You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@htrace.apache.org by iw...@apache.org on 2016/04/20 01:32:44 UTC

[1/7] incubator-htrace git commit: HTRACE-357. Rename htrace-htraced/go/src/org/apache/htrace to htrace-htraced/go/src/htrace (Colin Patrick McCabe via iwasakims)

Repository: incubator-htrace
Updated Branches:
  refs/heads/master e629995ac -> 5737e65b0


http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/rest.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/rest.go b/htrace-htraced/go/src/org/apache/htrace/htraced/rest.go
deleted file mode 100644
index eabeee7..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/rest.go
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"github.com/gorilla/mux"
-	"net"
-	"net/http"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// Set the response headers.
-func setResponseHeaders(hdr http.Header) {
-	hdr.Set("Content-Type", "application/json")
-}
-
-// Write a JSON error response.
-func writeError(lg *common.Logger, w http.ResponseWriter, errCode int,
-	errStr string) {
-	str := strings.Replace(errStr, `"`, `'`, -1)
-	lg.Info(str + "\n")
-	w.WriteHeader(errCode)
-	w.Write([]byte(`{ "error" : "` + str + `"}`))
-}
-
-type serverVersionHandler struct {
-	lg *common.Logger
-}
-
-func (hand *serverVersionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	setResponseHeaders(w.Header())
-	version := common.ServerVersion{ReleaseVersion: RELEASE_VERSION,
-		GitVersion: GIT_VERSION}
-	buf, err := json.Marshal(&version)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusInternalServerError,
-			fmt.Sprintf("error marshalling ServerVersion: %s\n", err.Error()))
-		return
-	}
-	if hand.lg.DebugEnabled() {
-		hand.lg.Debugf("Returned ServerVersion %s\n", string(buf))
-	}
-	w.Write(buf)
-}
-
-type serverDebugInfoHandler struct {
-	lg *common.Logger
-}
-
-func (hand *serverDebugInfoHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	setResponseHeaders(w.Header())
-	buf := make([]byte, 1<<20)
-	common.GetStackTraces(&buf)
-	resp := common.ServerDebugInfo{
-		StackTraces: string(buf),
-		GCStats:     common.GetGCStats(),
-	}
-	buf, err := json.Marshal(&resp)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusInternalServerError,
-			fmt.Sprintf("error marshalling ServerDebugInfo: %s\n", err.Error()))
-		return
-	}
-	w.Write(buf)
-	hand.lg.Info("Returned ServerDebugInfo\n")
-}
-
-type serverStatsHandler struct {
-	dataStoreHandler
-}
-
-func (hand *serverStatsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	setResponseHeaders(w.Header())
-	hand.lg.Debugf("serverStatsHandler\n")
-	stats := hand.store.ServerStats()
-	buf, err := json.Marshal(&stats)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusInternalServerError,
-			fmt.Sprintf("error marshalling ServerStats: %s\n", err.Error()))
-		return
-	}
-	hand.lg.Debugf("Returned ServerStats %s\n", string(buf))
-	w.Write(buf)
-}
-
-type serverConfHandler struct {
-	cnf *conf.Config
-	lg  *common.Logger
-}
-
-func (hand *serverConfHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	setResponseHeaders(w.Header())
-	hand.lg.Debugf("serverConfHandler\n")
-	cnfMap := hand.cnf.Export()
-	buf, err := json.Marshal(&cnfMap)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusInternalServerError,
-			fmt.Sprintf("error marshalling serverConf: %s\n", err.Error()))
-		return
-	}
-	hand.lg.Debugf("Returned server configuration %s\n", string(buf))
-	w.Write(buf)
-}
-
-type dataStoreHandler struct {
-	lg    *common.Logger
-	store *dataStore
-}
-
-func (hand *dataStoreHandler) parseSid(w http.ResponseWriter,
-	str string) (common.SpanId, bool) {
-	var id common.SpanId
-	err := id.FromString(str)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusBadRequest,
-			fmt.Sprintf("Failed to parse span ID %s: %s", str, err.Error()))
-		w.Write([]byte("Error parsing : " + err.Error()))
-		return common.INVALID_SPAN_ID, false
-	}
-	return id, true
-}
-
-func (hand *dataStoreHandler) getReqField32(fieldName string, w http.ResponseWriter,
-	req *http.Request) (int32, bool) {
-	str := req.FormValue(fieldName)
-	if str == "" {
-		writeError(hand.lg, w, http.StatusBadRequest, fmt.Sprintf("No %s specified.", fieldName))
-		return -1, false
-	}
-	val, err := strconv.ParseUint(str, 16, 32)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusBadRequest,
-			fmt.Sprintf("Error parsing %s: %s.", fieldName, err.Error()))
-		return -1, false
-	}
-	return int32(val), true
-}
-
-type findSidHandler struct {
-	dataStoreHandler
-}
-
-func (hand *findSidHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	setResponseHeaders(w.Header())
-	req.ParseForm()
-	vars := mux.Vars(req)
-	stringSid := vars["id"]
-	sid, ok := hand.parseSid(w, stringSid)
-	if !ok {
-		return
-	}
-	hand.lg.Debugf("findSidHandler(sid=%s)\n", sid.String())
-	span := hand.store.FindSpan(sid)
-	if span == nil {
-		writeError(hand.lg, w, http.StatusNoContent,
-			fmt.Sprintf("No such span as %s\n", sid.String()))
-		return
-	}
-	w.Write(span.ToJson())
-}
-
-type findChildrenHandler struct {
-	dataStoreHandler
-}
-
-func (hand *findChildrenHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	setResponseHeaders(w.Header())
-	req.ParseForm()
-	vars := mux.Vars(req)
-	stringSid := vars["id"]
-	sid, ok := hand.parseSid(w, stringSid)
-	if !ok {
-		return
-	}
-	var lim int32
-	lim, ok = hand.getReqField32("lim", w, req)
-	if !ok {
-		return
-	}
-	hand.lg.Debugf("findChildrenHandler(sid=%s, lim=%d)\n", sid.String(), lim)
-	children := hand.store.FindChildren(sid, lim)
-	jbytes, err := json.Marshal(children)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusInternalServerError,
-			fmt.Sprintf("Error marshalling children: %s", err.Error()))
-		return
-	}
-	w.Write(jbytes)
-}
-
-type writeSpansHandler struct {
-	dataStoreHandler
-}
-
-func (hand *writeSpansHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	startTime := time.Now()
-	setResponseHeaders(w.Header())
-	client, _, serr := net.SplitHostPort(req.RemoteAddr)
-	if serr != nil {
-		writeError(hand.lg, w, http.StatusBadRequest,
-			fmt.Sprintf("Failed to split host and port for %s: %s\n",
-				req.RemoteAddr, serr.Error()))
-		return
-	}
-	dec := json.NewDecoder(req.Body)
-	var msg common.WriteSpansReq
-	err := dec.Decode(&msg)
-	if (err != nil) {
-		writeError(hand.lg, w, http.StatusBadRequest,
-			fmt.Sprintf("Error parsing WriteSpansReq: %s", err.Error()))
-		return
-	}
-	if hand.lg.TraceEnabled() {
-		hand.lg.Tracef("%s: read WriteSpans REST message: %s\n",
-			req.RemoteAddr, asJson(&msg))
-	}
-	ing := hand.store.NewSpanIngestor(hand.lg, client, msg.DefaultTrid)
-	for spanIdx := 0; spanIdx < msg.NumSpans; spanIdx++ {
-		var span *common.Span
-		err := dec.Decode(&span)
-		if err != nil {
-			writeError(hand.lg, w, http.StatusBadRequest,
-				fmt.Sprintf("Failed to decode span %d out of %d: ",
-					spanIdx, msg.NumSpans, err.Error()))
-			return
-		}
-		ing.IngestSpan(span)
-	}
-	ing.Close(startTime)
-	return
-}
-
-type queryHandler struct {
-	lg *common.Logger
-	dataStoreHandler
-}
-
-func (hand *queryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	setResponseHeaders(w.Header())
-	queryString := req.FormValue("query")
-	if queryString == "" {
-		writeError(hand.lg, w, http.StatusBadRequest, "No query provided.\n")
-		return
-	}
-	var query common.Query
-	reader := bytes.NewBufferString(queryString)
-	dec := json.NewDecoder(reader)
-	err := dec.Decode(&query)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusBadRequest,
-			fmt.Sprintf("Error parsing query '%s': %s", queryString, err.Error()))
-		return
-	}
-	var results []*common.Span
-	results, err, _ = hand.store.HandleQuery(&query)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusInternalServerError,
-			fmt.Sprintf("Internal error processing query %s: %s",
-				query.String(), err.Error()))
-		return
-	}
-	var jbytes []byte
-	jbytes, err = json.Marshal(results)
-	if err != nil {
-		writeError(hand.lg, w, http.StatusInternalServerError,
-			fmt.Sprintf("Error marshalling results: %s", err.Error()))
-		return
-	}
-	w.Write(jbytes)
-}
-
-type logErrorHandler struct {
-	lg *common.Logger
-}
-
-func (hand *logErrorHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
-	hand.lg.Errorf("Got unknown request %s\n", req.RequestURI)
-	writeError(hand.lg, w, http.StatusBadRequest, "Unknown request.")
-}
-
-type RestServer struct {
-	http.Server
-	listener net.Listener
-	lg       *common.Logger
-}
-
-func CreateRestServer(cnf *conf.Config, store *dataStore,
-	listener net.Listener) (*RestServer, error) {
-	var err error
-	rsv := &RestServer{}
-	rsv.lg = common.NewLogger("rest", cnf)
-
-	r := mux.NewRouter().StrictSlash(false)
-
-	r.Handle("/server/info", &serverVersionHandler{lg: rsv.lg}).Methods("GET")
-	r.Handle("/server/version", &serverVersionHandler{lg: rsv.lg}).Methods("GET")
-	r.Handle("/server/debugInfo", &serverDebugInfoHandler{lg: rsv.lg}).Methods("GET")
-
-	serverStatsH := &serverStatsHandler{dataStoreHandler: dataStoreHandler{
-		store: store, lg: rsv.lg}}
-	r.Handle("/server/stats", serverStatsH).Methods("GET")
-
-	serverConfH := &serverConfHandler{cnf: cnf, lg: rsv.lg}
-	r.Handle("/server/conf", serverConfH).Methods("GET")
-
-	writeSpansH := &writeSpansHandler{dataStoreHandler: dataStoreHandler{
-		store: store, lg: rsv.lg}}
-	r.Handle("/writeSpans", writeSpansH).Methods("POST")
-
-	queryH := &queryHandler{lg: rsv.lg, dataStoreHandler: dataStoreHandler{store: store}}
-	r.Handle("/query", queryH).Methods("GET")
-
-	span := r.PathPrefix("/span").Subrouter()
-	findSidH := &findSidHandler{dataStoreHandler: dataStoreHandler{store: store, lg: rsv.lg}}
-	span.Handle("/{id}", findSidH).Methods("GET")
-
-	findChildrenH := &findChildrenHandler{dataStoreHandler: dataStoreHandler{store: store,
-		lg: rsv.lg}}
-	span.Handle("/{id}/children", findChildrenH).Methods("GET")
-
-	// Default Handler. This will serve requests for static requests.
-	webdir := os.Getenv("HTRACED_WEB_DIR")
-	if webdir == "" {
-		webdir, err = filepath.Abs(filepath.Join(filepath.Dir(os.Args[0]), "..", "web"))
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	rsv.lg.Infof(`Serving static files from "%s"`+"\n", webdir)
-	r.PathPrefix("/").Handler(http.FileServer(http.Dir(webdir))).Methods("GET")
-
-	// Log an error message for unknown non-GET requests.
-	r.PathPrefix("/").Handler(&logErrorHandler{lg: rsv.lg})
-
-	rsv.listener = listener
-	rsv.Handler = r
-	rsv.ErrorLog = rsv.lg.Wrap("[REST] ", common.INFO)
-	go rsv.Serve(rsv.listener)
-	rsv.lg.Infof("Started REST server on %s\n", rsv.listener.Addr().String())
-	return rsv, nil
-}
-
-func (rsv *RestServer) Addr() net.Addr {
-	return rsv.listener.Addr()
-}
-
-func (rsv *RestServer) Close() {
-	rsv.listener.Close()
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htracedTool/cmd.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htracedTool/cmd.go b/htrace-htraced/go/src/org/apache/htrace/htracedTool/cmd.go
deleted file mode 100644
index 2eff0a8..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htracedTool/cmd.go
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"github.com/alecthomas/kingpin"
-	"io"
-	htrace "org/apache/htrace/client"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"os"
-	"sort"
-	"strings"
-	"text/tabwriter"
-	"time"
-)
-
-var RELEASE_VERSION string
-var GIT_VERSION string
-
-const EXIT_SUCCESS = 0
-const EXIT_FAILURE = 1
-
-var verbose bool
-
-const USAGE = `The Apache HTrace command-line tool.  This tool retrieves and modifies settings and
-other data on a running htraced daemon.
-
-If we find an ` + conf.CONFIG_FILE_NAME + ` configuration file in the list of directories
-specified in ` + conf.HTRACED_CONF_DIR + `, we will use that configuration; otherwise, 
-the defaults will be used.
-`
-
-func main() {
-	// Load htraced configuration
-	cnf, cnfLog := conf.LoadApplicationConfig("htrace.tool.")
-	lg := common.NewLogger("conf", cnf)
-	defer lg.Close()
-	scanner := bufio.NewScanner(cnfLog)
-	for scanner.Scan() {
-		lg.Debugf(scanner.Text() + "\n")
-	}
-
-	// Parse argv
-	app := kingpin.New(os.Args[0], USAGE)
-	app.Flag("Dmy.key", "Set configuration key 'my.key' to 'my.value'.  Replace 'my.key' "+
-		"with any key you want to set.").Default("my.value").String()
-	addr := app.Flag("addr", "Server address.").String()
-	verbose = *app.Flag("verbose", "Verbose.").Default("false").Bool()
-	version := app.Command("version", "Print the version of this program.")
-	serverVersion := app.Command("serverVersion", "Print the version of the htraced server.")
-	serverStats := app.Command("serverStats", "Print statistics retrieved from the htraced server.")
-	serverStatsJson := serverStats.Flag("json", "Display statistics as raw JSON.").Default("false").Bool()
-	serverDebugInfo := app.Command("serverDebugInfo", "Print the debug info of the htraced server.")
-	serverConf := app.Command("serverConf", "Print the server configuration retrieved from the htraced server.")
-	findSpan := app.Command("findSpan", "Print information about a trace span with a given ID.")
-	findSpanId := findSpan.Arg("id", "Span ID to find. Example: be305e54-4534-2110-a0b2-e06b9effe112").Required().String()
-	findChildren := app.Command("findChildren", "Print out the span IDs that are children of a given span ID.")
-	parentSpanId := findChildren.Arg("id", "Span ID to print children for. Example: be305e54-4534-2110-a0b2-e06b9effe112").
-		Required().String()
-	childLim := findChildren.Flag("lim", "Maximum number of child IDs to print.").Default("20").Int()
-	loadFile := app.Command("loadFile", "Write whitespace-separated JSON spans from a file to the server.")
-	loadFilePath := loadFile.Arg("path",
-		"A file containing whitespace-separated span JSON.").Required().String()
-	loadJson := app.Command("load", "Write JSON spans from the command-line to the server.")
-	loadJsonArg := loadJson.Arg("json", "A JSON span to write to the server.").Required().String()
-	dumpAll := app.Command("dumpAll", "Dump all spans from the htraced daemon.")
-	dumpAllOutPath := dumpAll.Arg("path", "The path to dump the trace spans to.").Default("-").String()
-	dumpAllLim := dumpAll.Flag("lim", "The number of spans to transfer from the server at once.").
-		Default("100").Int()
-	graph := app.Command("graph", "Visualize span JSON as a graph.")
-	graphJsonFile := graph.Arg("input", "The JSON file to load").Required().String()
-	graphDotFile := graph.Flag("output",
-		"The path to write a GraphViz dotfile to.  This file can be used as input to "+
-			"GraphViz, in order to generate a pretty picture.  See graphviz.org for more "+
-			"information about generating pictures of graphs.").Default("-").String()
-	query := app.Command("query", "Send a query to htraced.")
-	queryLim := query.Flag("lim", "Maximum number of spans to retrieve.").Default("20").Int()
-	queryArg := query.Arg("query", "The query string to send.  Query strings have the format "+
-		"[TYPE] [OPERATOR] [CONST], joined by AND statements.").Required().String()
-	rawQuery := app.Command("rawQuery", "Send a raw JSON query to htraced.")
-	rawQueryArg := rawQuery.Arg("json", "The query JSON to send.").Required().String()
-	cmd := kingpin.MustParse(app.Parse(os.Args[1:]))
-
-	// Add the command-line settings into the configuration.
-	if *addr != "" {
-		cnf = cnf.Clone(conf.HTRACE_WEB_ADDRESS, *addr)
-	}
-
-	// Handle commands that don't require an HTrace client.
-	switch cmd {
-	case version.FullCommand():
-		os.Exit(printVersion())
-	case graph.FullCommand():
-		err := jsonSpanFileToDotFile(*graphJsonFile, *graphDotFile)
-		if err != nil {
-			fmt.Printf("graphing error: %s\n", err.Error())
-			os.Exit(EXIT_FAILURE)
-		}
-		os.Exit(EXIT_SUCCESS)
-	}
-
-	// Create HTrace client
-	hcl, err := htrace.NewClient(cnf, nil)
-	if err != nil {
-		fmt.Printf("Failed to create HTrace client: %s\n", err.Error())
-		os.Exit(EXIT_FAILURE)
-	}
-
-	// Handle commands that require an HTrace client.
-	switch cmd {
-	case version.FullCommand():
-		os.Exit(printVersion())
-	case serverVersion.FullCommand():
-		os.Exit(printServerVersion(hcl))
-	case serverStats.FullCommand():
-		if *serverStatsJson {
-			os.Exit(printServerStatsJson(hcl))
-		} else {
-			os.Exit(printServerStats(hcl))
-		}
-	case serverDebugInfo.FullCommand():
-		os.Exit(printServerDebugInfo(hcl))
-	case serverConf.FullCommand():
-		os.Exit(printServerConfJson(hcl))
-	case findSpan.FullCommand():
-		var id *common.SpanId
-		id.FromString(*findSpanId)
-		os.Exit(doFindSpan(hcl, *id))
-	case findChildren.FullCommand():
-		var id *common.SpanId
-		id.FromString(*parentSpanId)
-		os.Exit(doFindChildren(hcl, *id, *childLim))
-	case loadJson.FullCommand():
-		os.Exit(doLoadSpanJson(hcl, *loadJsonArg))
-	case loadFile.FullCommand():
-		os.Exit(doLoadSpanJsonFile(hcl, *loadFilePath))
-	case dumpAll.FullCommand():
-		err := doDumpAll(hcl, *dumpAllOutPath, *dumpAllLim)
-		if err != nil {
-			fmt.Printf("dumpAll error: %s\n", err.Error())
-			os.Exit(EXIT_FAILURE)
-		}
-		os.Exit(EXIT_SUCCESS)
-	case query.FullCommand():
-		err := doQueryFromString(hcl, *queryArg, *queryLim)
-		if err != nil {
-			fmt.Printf("query error: %s\n", err.Error())
-			os.Exit(EXIT_FAILURE)
-		}
-		os.Exit(EXIT_SUCCESS)
-	case rawQuery.FullCommand():
-		err := doRawQuery(hcl, *rawQueryArg)
-		if err != nil {
-			fmt.Printf("raw query error: %s\n", err.Error())
-			os.Exit(EXIT_FAILURE)
-		}
-		os.Exit(EXIT_SUCCESS)
-	}
-
-	app.UsageErrorf(os.Stderr, "You must supply a command to run.")
-}
-
-// Print the version of the htrace binary.
-func printVersion() int {
-	fmt.Printf("Running htracedTool %s [%s].\n", RELEASE_VERSION, GIT_VERSION)
-	return EXIT_SUCCESS
-}
-
-// Print information retrieved from an htraced server via /server/info
-func printServerVersion(hcl *htrace.Client) int {
-	ver, err := hcl.GetServerVersion()
-	if err != nil {
-		fmt.Println(err.Error())
-		return EXIT_FAILURE
-	}
-	fmt.Printf("HTraced server version %s (%s)\n", ver.ReleaseVersion, ver.GitVersion)
-	return EXIT_SUCCESS
-}
-
-// Print information retrieved from an htraced server via /server/info
-func printServerStats(hcl *htrace.Client) int {
-	stats, err := hcl.GetServerStats()
-	if err != nil {
-		fmt.Println(err.Error())
-		return EXIT_FAILURE
-	}
-	w := new(tabwriter.Writer)
-	w.Init(os.Stdout, 0, 8, 0, '\t', 0)
-	fmt.Fprintf(w, "HTRACED SERVER STATS\n")
-	fmt.Fprintf(w, "Datastore Start\t%s\n",
-		common.UnixMsToTime(stats.LastStartMs).Format(time.RFC3339))
-	fmt.Fprintf(w, "Server Time\t%s\n",
-		common.UnixMsToTime(stats.CurMs).Format(time.RFC3339))
-	fmt.Fprintf(w, "Spans reaped\t%d\n", stats.ReapedSpans)
-	fmt.Fprintf(w, "Spans ingested\t%d\n", stats.IngestedSpans)
-	fmt.Fprintf(w, "Spans written\t%d\n", stats.WrittenSpans)
-	fmt.Fprintf(w, "Spans dropped by server\t%d\n", stats.ServerDroppedSpans)
-	dur := time.Millisecond * time.Duration(stats.AverageWriteSpansLatencyMs)
-	fmt.Fprintf(w, "Average WriteSpan Latency\t%s\n", dur.String())
-	dur = time.Millisecond * time.Duration(stats.MaxWriteSpansLatencyMs)
-	fmt.Fprintf(w, "Maximum WriteSpan Latency\t%s\n", dur.String())
-	fmt.Fprintf(w, "Number of leveldb directories\t%d\n", len(stats.Dirs))
-	w.Flush()
-	fmt.Println("")
-	for i := range stats.Dirs {
-		dir := stats.Dirs[i]
-		fmt.Printf("==== %s ===\n", dir.Path)
-		fmt.Printf("Approximate number of bytes: %d\n", dir.ApproximateBytes)
-		stats := strings.Replace(dir.LevelDbStats, "\\n", "\n", -1)
-		fmt.Printf("%s\n", stats)
-	}
-	w = new(tabwriter.Writer)
-	w.Init(os.Stdout, 0, 8, 0, '\t', 0)
-	fmt.Fprintf(w, "HOST SPAN METRICS\n")
-	mtxMap := stats.HostSpanMetrics
-	keys := make(sort.StringSlice, len(mtxMap))
-	i := 0
-	for k, _ := range mtxMap {
-		keys[i] = k
-		i++
-	}
-	sort.Sort(keys)
-	for k := range keys {
-		mtx := mtxMap[keys[k]]
-		fmt.Fprintf(w, "%s\twritten: %d\tserver dropped: %d\n",
-			keys[k], mtx.Written, mtx.ServerDropped)
-	}
-	w.Flush()
-	return EXIT_SUCCESS
-}
-
-// Print information retrieved from an htraced server via /server/info as JSON
-func printServerStatsJson(hcl *htrace.Client) int {
-	stats, err := hcl.GetServerStats()
-	if err != nil {
-		fmt.Println(err.Error())
-		return EXIT_FAILURE
-	}
-	buf, err := json.MarshalIndent(stats, "", "  ")
-	if err != nil {
-		fmt.Printf("Error marshalling server stats: %s", err.Error())
-		return EXIT_FAILURE
-	}
-	fmt.Printf("%s\n", string(buf))
-	return EXIT_SUCCESS
-}
-
-// Print information retrieved from an htraced server via /server/debugInfo
-func printServerDebugInfo(hcl *htrace.Client) int {
-	stats, err := hcl.GetServerDebugInfo()
-	if err != nil {
-		fmt.Println(err.Error())
-		return EXIT_FAILURE
-	}
-	fmt.Println("=== GOROUTINE STACKS ===")
-	fmt.Print(stats.StackTraces)
-	fmt.Println("=== END GOROUTINE STACKS ===")
-	fmt.Println("=== GC STATISTICS ===")
-	fmt.Print(stats.GCStats)
-	fmt.Println("=== END GC STATISTICS ===")
-	return EXIT_SUCCESS
-}
-
-// Print information retrieved from an htraced server via /server/conf as JSON
-func printServerConfJson(hcl *htrace.Client) int {
-	cnf, err := hcl.GetServerConf()
-	if err != nil {
-		fmt.Println(err.Error())
-		return EXIT_FAILURE
-	}
-	buf, err := json.MarshalIndent(cnf, "", "  ")
-	if err != nil {
-		fmt.Printf("Error marshalling server conf: %s", err.Error())
-		return EXIT_FAILURE
-	}
-	fmt.Printf("%s\n", string(buf))
-	return EXIT_SUCCESS
-}
-
-// Print information about a trace span.
-func doFindSpan(hcl *htrace.Client, sid common.SpanId) int {
-	span, err := hcl.FindSpan(sid)
-	if err != nil {
-		fmt.Println(err.Error())
-		return EXIT_FAILURE
-	}
-	if span == nil {
-		fmt.Printf("Span ID not found.\n")
-		return EXIT_FAILURE
-	}
-	pbuf, err := json.MarshalIndent(span, "", "  ")
-	if err != nil {
-		fmt.Printf("Error: error pretty-printing span to JSON: %s\n", err.Error())
-		return EXIT_FAILURE
-	}
-	fmt.Printf("%s\n", string(pbuf))
-	return EXIT_SUCCESS
-}
-
-func doLoadSpanJsonFile(hcl *htrace.Client, spanFile string) int {
-	if spanFile == "" {
-		fmt.Printf("You must specify the json file to load.\n")
-		return EXIT_FAILURE
-	}
-	file, err := OpenInputFile(spanFile)
-	if err != nil {
-		fmt.Printf("Failed to open %s: %s\n", spanFile, err.Error())
-		return EXIT_FAILURE
-	}
-	defer file.Close()
-	return doLoadSpans(hcl, bufio.NewReader(file))
-}
-
-func doLoadSpanJson(hcl *htrace.Client, spanJson string) int {
-	return doLoadSpans(hcl, bytes.NewBufferString(spanJson))
-}
-
-func doLoadSpans(hcl *htrace.Client, reader io.Reader) int {
-	dec := json.NewDecoder(reader)
-	spans := make([]*common.Span, 0, 32)
-	var err error
-	for {
-		var span common.Span
-		if err = dec.Decode(&span); err != nil {
-			if err == io.EOF {
-				break
-			}
-			fmt.Printf("Failed to decode JSON: %s\n", err.Error())
-			return EXIT_FAILURE
-		}
-		spans = append(spans, &span)
-	}
-	if verbose {
-		fmt.Printf("Writing ")
-		prefix := ""
-		for i := range spans {
-			fmt.Printf("%s%s", prefix, spans[i].ToJson())
-			prefix = ", "
-		}
-		fmt.Printf("\n")
-	}
-	err = hcl.WriteSpans(spans)
-	if err != nil {
-		fmt.Println(err.Error())
-		return EXIT_FAILURE
-	}
-	return EXIT_SUCCESS
-}
-
-// Find information about the children of a span.
-func doFindChildren(hcl *htrace.Client, sid common.SpanId, lim int) int {
-	spanIds, err := hcl.FindChildren(sid, lim)
-	if err != nil {
-		fmt.Printf("%s\n", err.Error())
-		return EXIT_FAILURE
-	}
-	pbuf, err := json.MarshalIndent(spanIds, "", "  ")
-	if err != nil {
-		fmt.Println("Error: error pretty-printing span IDs to JSON: %s", err.Error())
-		return 1
-	}
-	fmt.Printf("%s\n", string(pbuf))
-	return 0
-}
-
-// Dump all spans from the htraced daemon.
-func doDumpAll(hcl *htrace.Client, outPath string, lim int) error {
-	file, err := CreateOutputFile(outPath)
-	if err != nil {
-		return err
-	}
-	w := bufio.NewWriter(file)
-	defer func() {
-		if file != nil {
-			w.Flush()
-			file.Close()
-		}
-	}()
-	out := make(chan *common.Span, 50)
-	var dumpErr error
-	go func() {
-		dumpErr = hcl.DumpAll(lim, out)
-	}()
-	var numSpans int64
-	nextLogTime := time.Now().Add(time.Second * 5)
-	for {
-		span, channelOpen := <-out
-		if !channelOpen {
-			break
-		}
-		if err == nil {
-			_, err = fmt.Fprintf(w, "%s\n", span.ToJson())
-		}
-		if verbose {
-			numSpans++
-			now := time.Now()
-			if !now.Before(nextLogTime) {
-				nextLogTime = now.Add(time.Second * 5)
-				fmt.Printf("received %d span(s)...\n", numSpans)
-			}
-		}
-	}
-	if err != nil {
-		return errors.New(fmt.Sprintf("Write error %s", err.Error()))
-	}
-	if dumpErr != nil {
-		return errors.New(fmt.Sprintf("Dump error %s", dumpErr.Error()))
-	}
-	err = w.Flush()
-	if err != nil {
-		return err
-	}
-	err = file.Close()
-	file = nil
-	if err != nil {
-		return err
-	}
-	return nil
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htracedTool/file.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htracedTool/file.go b/htrace-htraced/go/src/org/apache/htrace/htracedTool/file.go
deleted file mode 100644
index ea214be..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htracedTool/file.go
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bufio"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"org/apache/htrace/common"
-	"os"
-)
-
-// A file used for input.
-// Transparently supports using stdin for input.
-type InputFile struct {
-	*os.File
-	path string
-}
-
-// Open an input file.  Stdin will be used when path is -
-func OpenInputFile(path string) (*InputFile, error) {
-	if path == "-" {
-		return &InputFile{File: os.Stdin, path: path}, nil
-	}
-	file, err := os.Open(path)
-	if err != nil {
-		return nil, err
-	}
-	return &InputFile{File: file, path: path}, nil
-}
-
-func (file *InputFile) Close() {
-	if file.path != "-" {
-		file.File.Close()
-	}
-}
-
-// A file used for output.
-// Transparently supports using stdout for output.
-type OutputFile struct {
-	*os.File
-	path string
-}
-
-// Create an output file.  Stdout will be used when path is -
-func CreateOutputFile(path string) (*OutputFile, error) {
-	if path == "-" {
-		return &OutputFile{File: os.Stdout, path: path}, nil
-	}
-	file, err := os.Create(path)
-	if err != nil {
-		return nil, err
-	}
-	return &OutputFile{File: file, path: path}, nil
-}
-
-func (file *OutputFile) Close() error {
-	if file.path != "-" {
-		return file.File.Close()
-	}
-	return nil
-}
-
-// FailureDeferringWriter is a writer which allows us to call Printf multiple
-// times and then check if all the printfs succeeded at the very end, rather
-// than checking after each call.   We will not attempt to write more data
-// after the first write failure.
-type FailureDeferringWriter struct {
-	io.Writer
-	err error
-}
-
-func NewFailureDeferringWriter(writer io.Writer) *FailureDeferringWriter {
-	return &FailureDeferringWriter{writer, nil}
-}
-
-func (w *FailureDeferringWriter) Printf(format string, v ...interface{}) {
-	if w.err != nil {
-		return
-	}
-	str := fmt.Sprintf(format, v...)
-	_, err := w.Writer.Write([]byte(str))
-	if err != nil {
-		w.err = err
-	}
-}
-
-func (w *FailureDeferringWriter) Error() error {
-	return w.err
-}
-
-// Read a file full of whitespace-separated span JSON into a slice of spans.
-func readSpansFile(path string) (common.SpanSlice, error) {
-	file, err := OpenInputFile(path)
-	if err != nil {
-		return nil, err
-	}
-	defer file.Close()
-	return readSpans(bufio.NewReader(file))
-}
-
-// Read whitespace-separated span JSON into a slice of spans.
-func readSpans(reader io.Reader) (common.SpanSlice, error) {
-	spans := make(common.SpanSlice, 0)
-	dec := json.NewDecoder(reader)
-	for {
-		var span common.Span
-		err := dec.Decode(&span)
-		if err != nil {
-			if err != io.EOF {
-				return nil, errors.New(fmt.Sprintf("Decode error after decoding %d "+
-					"span(s): %s", len(spans), err.Error()))
-			}
-			break
-		}
-		spans = append(spans, &span)
-	}
-	return spans, nil
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htracedTool/file_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htracedTool/file_test.go b/htrace-htraced/go/src/org/apache/htrace/htracedTool/file_test.go
deleted file mode 100644
index 98e5e6c..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htracedTool/file_test.go
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"errors"
-	"io"
-	"io/ioutil"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"os"
-	"strings"
-	"testing"
-)
-
-func TestInputFileAndOutputFile(t *testing.T) {
-	tdir, err := ioutil.TempDir(os.TempDir(), "TestInputFileAndOutputFile")
-	if err != nil {
-		t.Fatalf("failed to create TempDir: %s\n", err.Error())
-	}
-	defer os.RemoveAll(tdir)
-	tpath := tdir + conf.PATH_SEP + "test"
-	var ofile *OutputFile
-	ofile, err = CreateOutputFile(tpath)
-	if err != nil {
-		t.Fatalf("failed to create OutputFile at %s: %s\n", tpath, err.Error())
-	}
-	defer func() {
-		if ofile != nil {
-			ofile.Close()
-		}
-	}()
-	w := NewFailureDeferringWriter(ofile)
-	w.Printf("Hello, world!\n")
-	w.Printf("2 + 2 = %d\n", 4)
-	if w.Error() != nil {
-		t.Fatalf("got unexpected error writing to %s: %s\n", tpath, w.Error().Error())
-	}
-	err = ofile.Close()
-	ofile = nil
-	if err != nil {
-		t.Fatalf("error on closing OutputFile for %s: %s\n", tpath, err.Error())
-	}
-	var ifile *InputFile
-	ifile, err = OpenInputFile(tpath)
-	defer ifile.Close()
-	expected := "Hello, world!\n2 + 2 = 4\n"
-	buf := make([]byte, len(expected))
-	_, err = io.ReadAtLeast(ifile, buf, len(buf))
-	if err != nil {
-		t.Fatalf("unexpected error on reading %s: %s\n", tpath, err.Error())
-	}
-	str := string(buf)
-	if str != expected {
-		t.Fatalf("Could not read back what we wrote to %s.\n"+
-			"Got:\n%s\nExpected:\n%s\n", tpath, str, expected)
-	}
-}
-
-type LimitedBufferWriter struct {
-	buf []byte
-	off int
-}
-
-const LIMITED_BUFFER_MESSAGE = "There isn't enough buffer to go around!"
-
-func (w *LimitedBufferWriter) Write(p []byte) (int, error) {
-	var nwritten int
-	for i := range p {
-		if w.off >= len(w.buf) {
-			return nwritten, errors.New(LIMITED_BUFFER_MESSAGE)
-		}
-		w.buf[w.off] = p[i]
-		w.off = w.off + 1
-		nwritten++
-	}
-	return nwritten, nil
-}
-
-func TestFailureDeferringWriter(t *testing.T) {
-	lw := LimitedBufferWriter{buf: make([]byte, 20), off: 0}
-	w := NewFailureDeferringWriter(&lw)
-	w.Printf("Zippity do dah #%d\n", 1)
-	w.Printf("Zippity do dah #%d\n", 2)
-	if w.Error() == nil {
-		t.Fatalf("expected FailureDeferringWriter to experience a failure due to " +
-			"limited buffer size, but it did not.")
-	}
-	if w.Error().Error() != LIMITED_BUFFER_MESSAGE {
-		t.Fatalf("expected FailureDeferringWriter to have the error message %s, but "+
-			"the message was %s\n", LIMITED_BUFFER_MESSAGE, w.Error().Error())
-	}
-	expected := "Zippity do dah #1\nZi"
-	if string(lw.buf) != expected {
-		t.Fatalf("expected LimitedBufferWriter to contain %s, but it contained %s "+
-			"instead.\n", expected, string(lw.buf))
-	}
-}
-
-func TestReadSpans(t *testing.T) {
-	SPAN_TEST_STR := `{"a":"b9f2a1e07b6e4f16b0c2b27303b20e79",` +
-		`"b":1424736225037,"e":1424736225901,"d":"ClientNamenodeProtocol#getFileInfo",` +
-		`"r":"FsShell","p":["3afebdc0a13f4feb811cc5c0e42d30b1"]}
-{"a":"3afebdc0a13f4feb811cc5c0e42d30b1","b":1424736224969,` +
-		`"e":1424736225960,"d":"getFileInfo","r":"FsShell","p":[],"n":{"path":"/"}}
-`
-	r := strings.NewReader(SPAN_TEST_STR)
-	spans, err := readSpans(r)
-	if err != nil {
-		t.Fatalf("Failed to read spans from string via readSpans: %s\n", err.Error())
-	}
-	SPAN_TEST_EXPECTED := common.SpanSlice{
-		&common.Span{
-			Id: common.TestId("b9f2a1e07b6e4f16b0c2b27303b20e79"),
-			SpanData: common.SpanData{
-				Begin:       1424736225037,
-				End:         1424736225901,
-				Description: "ClientNamenodeProtocol#getFileInfo",
-				TracerId:    "FsShell",
-				Parents:     []common.SpanId{common.TestId("3afebdc0a13f4feb811cc5c0e42d30b1")},
-			},
-		},
-		&common.Span{
-			Id: common.TestId("3afebdc0a13f4feb811cc5c0e42d30b1"),
-			SpanData: common.SpanData{
-				Begin:       1424736224969,
-				End:         1424736225960,
-				Description: "getFileInfo",
-				TracerId:    "FsShell",
-				Parents:     []common.SpanId{},
-				Info: common.TraceInfoMap{
-					"path": "/",
-				},
-			},
-		},
-	}
-	if len(spans) != len(SPAN_TEST_EXPECTED) {
-		t.Fatalf("Expected %d spans, but got %d\n",
-			len(SPAN_TEST_EXPECTED), len(spans))
-	}
-	for i := range SPAN_TEST_EXPECTED {
-		common.ExpectSpansEqual(t, spans[i], SPAN_TEST_EXPECTED[i])
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htracedTool/graph.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htracedTool/graph.go b/htrace-htraced/go/src/org/apache/htrace/htracedTool/graph.go
deleted file mode 100644
index 024d973..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htracedTool/graph.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"io"
-	"org/apache/htrace/common"
-	"os"
-	"sort"
-)
-
-// Create a dotfile from a json file.
-func jsonSpanFileToDotFile(jsonFile string, dotFile string) error {
-	spans, err := readSpansFile(jsonFile)
-	if err != nil {
-		return errors.New(fmt.Sprintf("error reading %s: %s",
-			jsonFile, err.Error()))
-	}
-	var file *OutputFile
-	file, err = CreateOutputFile(dotFile)
-	if err != nil {
-		return errors.New(fmt.Sprintf("error opening %s for write: %s",
-			dotFile, err.Error()))
-	}
-	defer func() {
-		if file != nil {
-			file.Close()
-		}
-	}()
-	writer := bufio.NewWriter(file)
-	err = spansToDot(spans, writer)
-	if err != nil {
-		return err
-	}
-	err = writer.Flush()
-	if err != nil {
-		return err
-	}
-	err = file.Close()
-	file = nil
-	return err
-}
-
-// Create output in dotfile format from a set of spans.
-func spansToDot(spans common.SpanSlice, writer io.Writer) error {
-	sort.Sort(spans)
-	idMap := make(map[[16]byte]*common.Span)
-	for i := range spans {
-		span := spans[i]
-		if idMap[span.Id.ToArray()] != nil {
-			fmt.Fprintf(os.Stderr, "There were multiple spans listed which "+
-				"had ID %s.\nFirst:%s\nOther:%s\n", span.Id.String(),
-				idMap[span.Id.ToArray()].ToJson(), span.ToJson())
-		} else {
-			idMap[span.Id.ToArray()] = span
-		}
-	}
-	childMap := make(map[[16]byte]common.SpanSlice)
-	for i := range spans {
-		child := spans[i]
-		for j := range child.Parents {
-			parent := idMap[child.Parents[j].ToArray()]
-			if parent == nil {
-				fmt.Fprintf(os.Stderr, "Can't find parent id %s for %s\n",
-					child.Parents[j].String(), child.ToJson())
-			} else {
-				children := childMap[parent.Id.ToArray()]
-				if children == nil {
-					children = make(common.SpanSlice, 0)
-				}
-				children = append(children, child)
-				childMap[parent.Id.ToArray()] = children
-			}
-		}
-	}
-	w := NewFailureDeferringWriter(writer)
-	w.Printf("digraph spans {\n")
-	// Write out the nodes with their descriptions.
-	for i := range spans {
-		w.Printf(fmt.Sprintf(`  "%s" [label="%s"];`+"\n",
-			spans[i].Id.String(), spans[i].Description))
-	}
-	// Write out the edges between nodes... the parent/children relationships
-	for i := range spans {
-		children := childMap[spans[i].Id.ToArray()]
-		sort.Sort(children)
-		if children != nil {
-			for c := range children {
-				w.Printf(fmt.Sprintf(`  "%s" -> "%s";`+"\n",
-					spans[i].Id.String(), children[c].Id))
-			}
-		}
-	}
-	w.Printf("}\n")
-	return w.Error()
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htracedTool/graph_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htracedTool/graph_test.go b/htrace-htraced/go/src/org/apache/htrace/htracedTool/graph_test.go
deleted file mode 100644
index 621b3dc..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htracedTool/graph_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bytes"
-	"org/apache/htrace/common"
-	"testing"
-)
-
-func TestSpansToDot(t *testing.T) {
-	TEST_SPANS := common.SpanSlice{
-		&common.Span{
-			Id: common.TestId("814c8ee0e7984be3a8af00ac64adccb6"),
-			SpanData: common.SpanData{
-				Begin:       1424813349020,
-				End:         1424813349134,
-				Description: "newDFSInputStream",
-				TracerId:    "FsShell",
-				Parents:     []common.SpanId{},
-				Info: common.TraceInfoMap{
-					"path": "/",
-				},
-			},
-		},
-		&common.Span{
-			Id: common.TestId("cf2d5de696454548bc055d1e6024054c"),
-			SpanData: common.SpanData{
-				Begin:       1424813349025,
-				End:         1424813349133,
-				Description: "getBlockLocations",
-				TracerId:    "FsShell",
-				Parents:     []common.SpanId{common.TestId("814c8ee0e7984be3a8af00ac64adccb6")},
-			},
-		},
-		&common.Span{
-			Id: common.TestId("37623806f9c64483b834b8ea5d6b4827"),
-			SpanData: common.SpanData{
-				Begin:       1424813349027,
-				End:         1424813349073,
-				Description: "ClientNamenodeProtocol#getBlockLocations",
-				TracerId:    "FsShell",
-				Parents:     []common.SpanId{common.TestId("cf2d5de696454548bc055d1e6024054c")},
-			},
-		},
-	}
-	w := bytes.NewBuffer(make([]byte, 0, 2048))
-	err := spansToDot(TEST_SPANS, w)
-	if err != nil {
-		t.Fatalf("spansToDot failed: error %s\n", err.Error())
-	}
-	EXPECTED_STR := `digraph spans {
-  "37623806f9c64483b834b8ea5d6b4827" [label="ClientNamenodeProtocol#getBlockLocations"];
-  "814c8ee0e7984be3a8af00ac64adccb6" [label="newDFSInputStream"];
-  "cf2d5de696454548bc055d1e6024054c" [label="getBlockLocations"];
-  "814c8ee0e7984be3a8af00ac64adccb6" -> "cf2d5de696454548bc055d1e6024054c";
-  "cf2d5de696454548bc055d1e6024054c" -> "37623806f9c64483b834b8ea5d6b4827";
-}
-`
-	if w.String() != EXPECTED_STR {
-		t.Fatalf("Expected to get:\n%s\nGot:\n%s\n", EXPECTED_STR, w.String())
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htracedTool/queries.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htracedTool/queries.go b/htrace-htraced/go/src/org/apache/htrace/htracedTool/queries.go
deleted file mode 100644
index 1e6f51f..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htracedTool/queries.go
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	htrace "org/apache/htrace/client"
-	"org/apache/htrace/common"
-	"strings"
-	"unicode"
-)
-
-// Convert a string into a whitespace-separated sequence of strings.
-func tokenize(str string) []string {
-	prevQuote := rune(0)
-	f := func(c rune) bool {
-		switch {
-		case c == prevQuote:
-			prevQuote = rune(0)
-			return true
-		case prevQuote != rune(0):
-			return false
-		case unicode.In(c, unicode.Quotation_Mark):
-			prevQuote = c
-			return true
-		default:
-			return unicode.IsSpace(c)
-		}
-	}
-	return strings.FieldsFunc(str, f)
-}
-
-// Parses a query string in the format of a series of
-// [TYPE] [OPERATOR] [CONST] tuples, joined by AND statements.
-type predicateParser struct {
-	tokens   []string
-	curToken int
-}
-
-func (ps *predicateParser) Parse() (*common.Predicate, error) {
-	if ps.curToken >= len(ps.tokens) {
-		return nil, nil
-	}
-	if ps.curToken > 0 {
-		if strings.ToLower(ps.tokens[ps.curToken]) != "and" {
-			return nil, errors.New(fmt.Sprintf("Error parsing on token %d: "+
-				"expected predicates to be joined by 'and', but found '%s'",
-				ps.curToken, ps.tokens[ps.curToken]))
-		}
-		ps.curToken++
-		if ps.curToken > len(ps.tokens) {
-			return nil, errors.New(fmt.Sprintf("Nothing found after 'and' at "+
-				"token %d", ps.curToken))
-		}
-	}
-	field := common.Field(strings.ToLower(ps.tokens[ps.curToken]))
-	if !field.IsValid() {
-		return nil, errors.New(fmt.Sprintf("Invalid field specifier at token %d.  "+
-			"Can't understand %s.  Valid field specifiers are %v", ps.curToken,
-			ps.tokens[ps.curToken], common.ValidFields()))
-	}
-	ps.curToken++
-	if ps.curToken > len(ps.tokens) {
-		return nil, errors.New(fmt.Sprintf("Nothing found after field specifier "+
-			"at token %d", ps.curToken))
-	}
-	op := common.Op(strings.ToLower(ps.tokens[ps.curToken]))
-	if !op.IsValid() {
-		return nil, errors.New(fmt.Sprintf("Invalid operation specifier at token %d.  "+
-			"Can't understand %s.  Valid operation specifiers are %v", ps.curToken,
-			ps.tokens[ps.curToken], common.ValidOps()))
-	}
-	ps.curToken++
-	if ps.curToken > len(ps.tokens) {
-		return nil, errors.New(fmt.Sprintf("Nothing found after field specifier "+
-			"at token %d", ps.curToken))
-	}
-	val := ps.tokens[ps.curToken]
-	ps.curToken++
-	return &common.Predicate{Op: op, Field: field, Val: val}, nil
-}
-
-func parseQueryString(str string) ([]common.Predicate, error) {
-	ps := predicateParser{tokens: tokenize(str)}
-	if verbose {
-		fmt.Printf("Running query [ ")
-		prefix := ""
-		for tokenIdx := range ps.tokens {
-			fmt.Printf("%s'%s'", prefix, ps.tokens[tokenIdx])
-			prefix = ", "
-		}
-		fmt.Printf(" ]\n")
-	}
-	preds := make([]common.Predicate, 0)
-	for {
-		pred, err := ps.Parse()
-		if err != nil {
-			return nil, err
-		}
-		if pred == nil {
-			break
-		}
-		preds = append(preds, *pred)
-	}
-	if len(preds) == 0 {
-		return nil, errors.New("Empty query string")
-	}
-	return preds, nil
-}
-
-// Send a query from a query string.
-func doQueryFromString(hcl *htrace.Client, str string, lim int) error {
-	query := &common.Query{Lim: lim}
-	var err error
-	query.Predicates, err = parseQueryString(str)
-	if err != nil {
-		return err
-	}
-	return doQuery(hcl, query)
-}
-
-// Send a query from a raw JSON string.
-func doRawQuery(hcl *htrace.Client, str string) error {
-	jsonBytes := []byte(str)
-	var query common.Query
-	err := json.Unmarshal(jsonBytes, &query)
-	if err != nil {
-		return errors.New(fmt.Sprintf("Error parsing provided JSON: %s\n", err.Error()))
-	}
-	return doQuery(hcl, &query)
-}
-
-// Send a query.
-func doQuery(hcl *htrace.Client, query *common.Query) error {
-	if verbose {
-		qbytes, err := json.Marshal(*query)
-		if err != nil {
-			qbytes = []byte("marshaling error: " + err.Error())
-		}
-		fmt.Printf("Sending query: %s\n", string(qbytes))
-	}
-	spans, err := hcl.Query(query)
-	if err != nil {
-		return err
-	}
-	if verbose {
-		fmt.Printf("%d results...\n", len(spans))
-	}
-	for i := range spans {
-		fmt.Printf("%s\n", spans[i].ToJson())
-	}
-	return nil
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htracedTool/query_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htracedTool/query_test.go b/htrace-htraced/go/src/org/apache/htrace/htracedTool/query_test.go
deleted file mode 100644
index 755d0b0..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htracedTool/query_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"encoding/json"
-	"org/apache/htrace/common"
-	"reflect"
-	"testing"
-)
-
-func predsToStr(preds []common.Predicate) string {
-	b, err := json.MarshalIndent(preds, "", "  ")
-	if err != nil {
-		return "JSON marshaling error: " + err.Error()
-	}
-	return string(b)
-}
-
-func checkParseQueryString(t *testing.T, str string, epreds []common.Predicate) {
-	preds, err := parseQueryString(str)
-	if err != nil {
-		t.Fatalf("got unexpected parseQueryString error: %s\n", err.Error())
-	}
-	if !reflect.DeepEqual(preds, epreds) {
-		t.Fatalf("Unexpected result from parseQueryString.  "+
-			"Expected: %s, got: %s\n", predsToStr(epreds), predsToStr(preds))
-	}
-}
-
-func TestParseQueryString(t *testing.T) {
-	verbose = testing.Verbose()
-	checkParseQueryString(t, "description eq ls", []common.Predicate{
-		common.Predicate{
-			Op:    common.EQUALS,
-			Field: common.DESCRIPTION,
-			Val:   "ls",
-		},
-	})
-	checkParseQueryString(t, "begin gt 123 and end le 456", []common.Predicate{
-		common.Predicate{
-			Op:    common.GREATER_THAN,
-			Field: common.BEGIN_TIME,
-			Val:   "123",
-		},
-		common.Predicate{
-			Op:    common.LESS_THAN_OR_EQUALS,
-			Field: common.END_TIME,
-			Val:   "456",
-		},
-	})
-	checkParseQueryString(t, `DESCRIPTION cn "Foo Bar" and `+
-		`BEGIN ge "999" and SPANID eq "4565d8abc4f70ac1216a3f1834c6860b"`,
-		[]common.Predicate{
-			common.Predicate{
-				Op:    common.CONTAINS,
-				Field: common.DESCRIPTION,
-				Val:   "Foo Bar",
-			},
-			common.Predicate{
-				Op:    common.GREATER_THAN_OR_EQUALS,
-				Field: common.BEGIN_TIME,
-				Val:   "999",
-			},
-			common.Predicate{
-				Op:    common.EQUALS,
-				Field: common.SPAN_ID,
-				Val:   "4565d8abc4f70ac1216a3f1834c6860b",
-			},
-		})
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/test/random.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/test/random.go b/htrace-htraced/go/src/org/apache/htrace/test/random.go
deleted file mode 100644
index 540ea14..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/test/random.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package test
-
-import (
-	"fmt"
-	"math/rand"
-	"org/apache/htrace/common"
-)
-
-func NonZeroRand64(rnd *rand.Rand) int64 {
-	for {
-		r := rnd.Int63()
-		if r == 0 {
-			continue
-		}
-		if rnd.Intn(1) != 0 {
-			return -r
-		}
-		return r
-	}
-}
-
-func NonZeroRandSpanId(rnd *rand.Rand) common.SpanId {
-	var id common.SpanId
-	id = make([]byte, 16)
-	for i := 0; i < len(id); i++ {
-		id[i] = byte(rnd.Intn(0x100))
-	}
-	return id
-}
-
-func NonZeroRand32(rnd *rand.Rand) int32 {
-	for {
-		r := rnd.Int31()
-		if r == 0 {
-			continue
-		}
-		if rnd.Intn(1) != 0 {
-			return -r
-		}
-		return r
-	}
-}
-
-// Create a random span.
-func NewRandomSpan(rnd *rand.Rand, potentialParents []*common.Span) *common.Span {
-	parents := []common.SpanId{}
-	if potentialParents != nil {
-		parentIdx := rnd.Intn(len(potentialParents) + 1)
-		if parentIdx < len(potentialParents) {
-			parents = []common.SpanId{potentialParents[parentIdx].Id}
-		}
-	}
-	return &common.Span{Id: NonZeroRandSpanId(rnd),
-		SpanData: common.SpanData{
-			Begin:       NonZeroRand64(rnd),
-			End:         NonZeroRand64(rnd),
-			Description: "getFileDescriptors",
-			Parents:     parents,
-			TracerId:    fmt.Sprintf("tracer%d", NonZeroRand32(rnd)),
-		}}
-}


[7/7] incubator-htrace git commit: HTRACE-357. Rename htrace-htraced/go/src/org/apache/htrace to htrace-htraced/go/src/htrace (Colin Patrick McCabe via iwasakims)

Posted by iw...@apache.org.
HTRACE-357. Rename htrace-htraced/go/src/org/apache/htrace to htrace-htraced/go/src/htrace (Colin Patrick McCabe via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/incubator-htrace/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-htrace/commit/5737e65b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-htrace/tree/5737e65b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-htrace/diff/5737e65b

Branch: refs/heads/master
Commit: 5737e65b04b0fe7f26ae8bf9a5a9abcf22f14f55
Parents: e629995
Author: Masatake Iwasaki <iw...@apache.org>
Authored: Wed Apr 20 00:27:32 2016 +0900
Committer: Masatake Iwasaki <iw...@apache.org>
Committed: Wed Apr 20 00:27:32 2016 +0900

----------------------------------------------------------------------
 htrace-htraced/go/gobuild.sh                    |    6 +-
 htrace-htraced/go/src/htrace/client/client.go   |  285 ++++
 htrace-htraced/go/src/htrace/client/hclient.go  |  185 +++
 htrace-htraced/go/src/htrace/common/log.go      |  332 +++++
 htrace-htraced/go/src/htrace/common/log_test.go |  170 +++
 htrace-htraced/go/src/htrace/common/process.go  |  101 ++
 .../go/src/htrace/common/process_test.go        |  116 ++
 htrace-htraced/go/src/htrace/common/query.go    |  128 ++
 .../go/src/htrace/common/query_test.go          |   50 +
 htrace-htraced/go/src/htrace/common/rpc.go      |  159 +++
 .../go/src/htrace/common/semaphore.go           |   78 +
 .../go/src/htrace/common/semaphore_test.go      |   86 ++
 htrace-htraced/go/src/htrace/common/span.go     |  217 +++
 .../go/src/htrace/common/span_test.go           |  116 ++
 .../go/src/htrace/common/test_util.go           |   91 ++
 htrace-htraced/go/src/htrace/common/time.go     |   34 +
 .../go/src/htrace/common/time_test.go           |   38 +
 htrace-htraced/go/src/htrace/conf/config.go     |  302 ++++
 .../go/src/htrace/conf/config_keys.go           |  134 ++
 .../go/src/htrace/conf/config_test.go           |  144 ++
 htrace-htraced/go/src/htrace/conf/xml.go        |   61 +
 .../go/src/htrace/htraced/client_test.go        |  484 +++++++
 .../go/src/htrace/htraced/datastore.go          | 1339 +++++++++++++++++
 .../go/src/htrace/htraced/datastore_test.go     |  761 ++++++++++
 .../go/src/htrace/htraced/heartbeater.go        |  125 ++
 .../go/src/htrace/htraced/heartbeater_test.go   |  100 ++
 htrace-htraced/go/src/htrace/htraced/hrpc.go    |  386 +++++
 htrace-htraced/go/src/htrace/htraced/htraced.go |  181 +++
 htrace-htraced/go/src/htrace/htraced/loader.go  |  511 +++++++
 htrace-htraced/go/src/htrace/htraced/metrics.go |  194 +++
 .../go/src/htrace/htraced/metrics_test.go       |  172 +++
 .../go/src/htrace/htraced/mini_htraced.go       |  193 +++
 .../go/src/htrace/htraced/reaper_test.go        |   83 ++
 htrace-htraced/go/src/htrace/htraced/rest.go    |  376 +++++
 htrace-htraced/go/src/htrace/htracedTool/cmd.go |  442 ++++++
 .../go/src/htrace/htracedTool/file.go           |  138 ++
 .../go/src/htrace/htracedTool/file_test.go      |  161 +++
 .../go/src/htrace/htracedTool/graph.go          |  116 ++
 .../go/src/htrace/htracedTool/graph_test.go     |   80 ++
 .../go/src/htrace/htracedTool/queries.go        |  172 +++
 .../go/src/htrace/htracedTool/query_test.go     |   88 ++
 htrace-htraced/go/src/htrace/test/random.go     |   80 ++
 .../go/src/org/apache/htrace/client/client.go   |  285 ----
 .../go/src/org/apache/htrace/client/hclient.go  |  185 ---
 .../go/src/org/apache/htrace/common/log.go      |  332 -----
 .../go/src/org/apache/htrace/common/log_test.go |  170 ---
 .../go/src/org/apache/htrace/common/process.go  |  101 --
 .../org/apache/htrace/common/process_test.go    |  116 --
 .../go/src/org/apache/htrace/common/query.go    |  128 --
 .../src/org/apache/htrace/common/query_test.go  |   50 -
 .../go/src/org/apache/htrace/common/rpc.go      |  159 ---
 .../src/org/apache/htrace/common/semaphore.go   |   78 -
 .../org/apache/htrace/common/semaphore_test.go  |   86 --
 .../go/src/org/apache/htrace/common/span.go     |  217 ---
 .../src/org/apache/htrace/common/span_test.go   |  116 --
 .../src/org/apache/htrace/common/test_util.go   |   91 --
 .../go/src/org/apache/htrace/common/time.go     |   34 -
 .../src/org/apache/htrace/common/time_test.go   |   38 -
 .../go/src/org/apache/htrace/conf/config.go     |  302 ----
 .../src/org/apache/htrace/conf/config_keys.go   |  134 --
 .../src/org/apache/htrace/conf/config_test.go   |  144 --
 .../go/src/org/apache/htrace/conf/xml.go        |   61 -
 .../org/apache/htrace/htraced/client_test.go    |  484 -------
 .../src/org/apache/htrace/htraced/datastore.go  | 1340 ------------------
 .../org/apache/htrace/htraced/datastore_test.go |  761 ----------
 .../org/apache/htrace/htraced/heartbeater.go    |  125 --
 .../apache/htrace/htraced/heartbeater_test.go   |  100 --
 .../go/src/org/apache/htrace/htraced/hrpc.go    |  386 -----
 .../go/src/org/apache/htrace/htraced/htraced.go |  181 ---
 .../go/src/org/apache/htrace/htraced/loader.go  |  511 -------
 .../go/src/org/apache/htrace/htraced/metrics.go |  194 ---
 .../org/apache/htrace/htraced/metrics_test.go   |  172 ---
 .../org/apache/htrace/htraced/mini_htraced.go   |  193 ---
 .../org/apache/htrace/htraced/reaper_test.go    |   83 --
 .../go/src/org/apache/htrace/htraced/rest.go    |  376 -----
 .../go/src/org/apache/htrace/htracedTool/cmd.go |  442 ------
 .../src/org/apache/htrace/htracedTool/file.go   |  138 --
 .../org/apache/htrace/htracedTool/file_test.go  |  161 ---
 .../src/org/apache/htrace/htracedTool/graph.go  |  116 --
 .../org/apache/htrace/htracedTool/graph_test.go |   80 --
 .../org/apache/htrace/htracedTool/queries.go    |  172 ---
 .../org/apache/htrace/htracedTool/query_test.go |   88 --
 .../go/src/org/apache/htrace/test/random.go     |   80 --
 83 files changed, 9012 insertions(+), 9013 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/gobuild.sh
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/gobuild.sh b/htrace-htraced/go/gobuild.sh
index de9e687..98123be 100755
--- a/htrace-htraced/go/gobuild.sh
+++ b/htrace-htraced/go/gobuild.sh
@@ -132,15 +132,15 @@ install)
     # Inject the release and git version into the htraced ldflags.
     echo "Building ${RELEASE_VERSION} [${GIT_VERSION}]"
     FLAGS="-X main.RELEASE_VERSION ${RELEASE_VERSION} -X main.GIT_VERSION ${GIT_VERSION}"
-    go install ${TAGS} -ldflags "${FLAGS}" -v org/apache/htrace/... "$@" \
+    go install ${TAGS} -ldflags "${FLAGS}" -v htrace/... "$@" \
         || die "go install failed."
     # Set the RPATH to make bundling leveldb and snappy easier.
     set_rpath "${GOBIN}/htraced"
     ;;
 bench)
-    go test org/apache/htrace/... ${TAGS} -test.bench=. "$@"
+    go test htrace/... ${TAGS} -test.bench=. "$@"
     ;;
 *)
-    go ${ACTION} org/apache/htrace/... ${TAGS} "$@"
+    go ${ACTION} htrace/... ${TAGS} "$@"
     ;;
 esac

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/client/client.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/client/client.go b/htrace-htraced/go/src/htrace/client/client.go
new file mode 100644
index 0000000..81b45d3
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/client/client.go
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"htrace/common"
+	"htrace/conf"
+	"io"
+	"io/ioutil"
+	"net/http"
+)
+
+// A golang client for htraced.
+// TODO: fancier APIs for streaming spans in the background, optimize TCP stuff
+func NewClient(cnf *conf.Config, testHooks *TestHooks) (*Client, error) {
+	hcl := Client{testHooks: testHooks}
+	hcl.restAddr = cnf.Get(conf.HTRACE_WEB_ADDRESS)
+	if testHooks != nil && testHooks.HrpcDisabled {
+		hcl.hrpcAddr = ""
+	} else {
+		hcl.hrpcAddr = cnf.Get(conf.HTRACE_HRPC_ADDRESS)
+	}
+	return &hcl, nil
+}
+
+type TestHooks struct {
+	// If true, HRPC is disabled.
+	HrpcDisabled bool
+
+	// A function which gets called after we connect to the server and send the
+	// message frame, but before we write the message body.
+	HandleWriteRequestBody func()
+}
+
+type Client struct {
+	// REST address of the htraced server.
+	restAddr string
+
+	// HRPC address of the htraced server.
+	hrpcAddr string
+
+	// The test hooks to use, or nil if test hooks are not enabled.
+	testHooks *TestHooks
+}
+
+// Get the htraced server version information.
+func (hcl *Client) GetServerVersion() (*common.ServerVersion, error) {
+	buf, _, err := hcl.makeGetRequest("server/info")
+	if err != nil {
+		return nil, err
+	}
+	var info common.ServerVersion
+	err = json.Unmarshal(buf, &info)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
+			"body %s: %s", string(buf), err.Error()))
+	}
+	return &info, nil
+}
+
+// Get the htraced server debug information.
+func (hcl *Client) GetServerDebugInfo() (*common.ServerDebugInfo, error) {
+	buf, _, err := hcl.makeGetRequest("server/debugInfo")
+	if err != nil {
+		return nil, err
+	}
+	var debugInfo common.ServerDebugInfo
+	err = json.Unmarshal(buf, &debugInfo)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
+			"body %s: %s", string(buf), err.Error()))
+	}
+	return &debugInfo, nil
+}
+
+// Get the htraced server statistics.
+func (hcl *Client) GetServerStats() (*common.ServerStats, error) {
+	buf, _, err := hcl.makeGetRequest("server/stats")
+	if err != nil {
+		return nil, err
+	}
+	var stats common.ServerStats
+	err = json.Unmarshal(buf, &stats)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
+			"body %s: %s", string(buf), err.Error()))
+	}
+	return &stats, nil
+}
+
+// Get the htraced server statistics.
+func (hcl *Client) GetServerConf() (map[string]string, error) {
+	buf, _, err := hcl.makeGetRequest("server/conf")
+	if err != nil {
+		return nil, err
+	}
+	cnf := make(map[string]string)
+	err = json.Unmarshal(buf, &cnf)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
+			"body %s: %s", string(buf), err.Error()))
+	}
+	return cnf, nil
+}
+
+// Get information about a trace span.  Returns nil, nil if the span was not found.
+func (hcl *Client) FindSpan(sid common.SpanId) (*common.Span, error) {
+	buf, rc, err := hcl.makeGetRequest(fmt.Sprintf("span/%s", sid.String()))
+	if err != nil {
+		if rc == http.StatusNoContent {
+			return nil, nil
+		}
+		return nil, err
+	}
+	var span common.Span
+	err = json.Unmarshal(buf, &span)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error unmarshalling response "+
+			"body %s: %s", string(buf), err.Error()))
+	}
+	return &span, nil
+}
+
+func (hcl *Client) WriteSpans(spans []*common.Span) error {
+	if hcl.hrpcAddr == "" {
+		return hcl.writeSpansHttp(spans)
+	}
+	hcr, err := newHClient(hcl.hrpcAddr, hcl.testHooks)
+	if err != nil {
+		return err
+	}
+	defer hcr.Close()
+	return hcr.writeSpans(spans)
+}
+
+func (hcl *Client) writeSpansHttp(spans []*common.Span) error {
+	req := common.WriteSpansReq{
+		NumSpans: len(spans),
+	}
+	var w bytes.Buffer
+	enc := json.NewEncoder(&w)
+	err := enc.Encode(req)
+	if err != nil {
+		return errors.New(fmt.Sprintf("Error serializing WriteSpansReq: %s",
+			err.Error()))
+	}
+	for spanIdx := range spans {
+		err := enc.Encode(spans[spanIdx])
+		if err != nil {
+			return errors.New(fmt.Sprintf("Error serializing span %d out "+
+				"of %d: %s", spanIdx, len(spans), err.Error()))
+		}
+	}
+	_, _, err = hcl.makeRestRequest("POST", "writeSpans", &w)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Find the child IDs of a given span ID.
+func (hcl *Client) FindChildren(sid common.SpanId, lim int) ([]common.SpanId, error) {
+	buf, _, err := hcl.makeGetRequest(fmt.Sprintf("span/%s/children?lim=%d",
+		sid.String(), lim))
+	if err != nil {
+		return nil, err
+	}
+	var spanIds []common.SpanId
+	err = json.Unmarshal(buf, &spanIds)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
+			"body %s: %s", string(buf), err.Error()))
+	}
+	return spanIds, nil
+}
+
+// Make a query
+func (hcl *Client) Query(query *common.Query) ([]common.Span, error) {
+	in, err := json.Marshal(query)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error marshalling query: %s", err.Error()))
+	}
+	var out []byte
+	var url = fmt.Sprintf("query?query=%s", in)
+	out, _, err = hcl.makeGetRequest(url)
+	if err != nil {
+		return nil, err
+	}
+	var spans []common.Span
+	err = json.Unmarshal(out, &spans)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error unmarshalling results: %s", err.Error()))
+	}
+	return spans, nil
+}
+
+func (hcl *Client) makeGetRequest(reqName string) ([]byte, int, error) {
+	return hcl.makeRestRequest("GET", reqName, nil)
+}
+
+// Make a general JSON REST request.
+// Returns the request body, the response code, and the error.
+// Note: if the response code is non-zero, the error will also be non-zero.
+func (hcl *Client) makeRestRequest(reqType string, reqName string,
+	reqBody io.Reader) ([]byte, int, error) {
+	url := fmt.Sprintf("http://%s/%s",
+		hcl.restAddr, reqName)
+	req, err := http.NewRequest(reqType, url, reqBody)
+	req.Header.Set("Content-Type", "application/json")
+	client := &http.Client{}
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, -1, errors.New(fmt.Sprintf("Error: error making http request to %s: %s\n", url,
+			err.Error()))
+	}
+	defer resp.Body.Close()
+	body, err2 := ioutil.ReadAll(resp.Body)
+	if err2 != nil {
+		return nil, -1, errors.New(fmt.Sprintf("Error: error reading response body: %s\n", err2.Error()))
+	}
+	if resp.StatusCode != http.StatusOK {
+		return nil, resp.StatusCode,
+			errors.New(fmt.Sprintf("Error: got bad response status from "+
+				"%s: %s\n%s\n", url, resp.Status, body))
+	}
+	return body, 0, nil
+}
+
+// Dump all spans from the htraced daemon.
+func (hcl *Client) DumpAll(lim int, out chan *common.Span) error {
+	defer func() {
+		close(out)
+	}()
+	searchId := common.INVALID_SPAN_ID
+	for {
+		q := common.Query{
+			Lim: lim,
+			Predicates: []common.Predicate{
+				common.Predicate{
+					Op:    "ge",
+					Field: "spanid",
+					Val:   searchId.String(),
+				},
+			},
+		}
+		spans, err := hcl.Query(&q)
+		if err != nil {
+			return errors.New(fmt.Sprintf("Error querying spans with IDs at or after "+
+				"%s: %s", searchId.String(), err.Error()))
+		}
+		if len(spans) == 0 {
+			return nil
+		}
+		for i := range spans {
+			out <- &spans[i]
+		}
+		searchId = spans[len(spans)-1].Id.Next()
+	}
+}
+
+func (hcl *Client) Close() {
+	hcl.restAddr = ""
+	hcl.hrpcAddr = ""
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/client/hclient.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/client/hclient.go b/htrace-htraced/go/src/htrace/client/hclient.go
new file mode 100644
index 0000000..a196f2d
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/client/hclient.go
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package client
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"github.com/ugorji/go/codec"
+	"htrace/common"
+	"io"
+	"net"
+	"net/rpc"
+)
+
+type hClient struct {
+	rpcClient *rpc.Client
+}
+
+type HrpcClientCodec struct {
+	rwc       io.ReadWriteCloser
+	length    uint32
+	testHooks *TestHooks
+}
+
+func (cdc *HrpcClientCodec) WriteRequest(rr *rpc.Request, msg interface{}) error {
+	methodId := common.HrpcMethodNameToId(rr.ServiceMethod)
+	if methodId == common.METHOD_ID_NONE {
+		return errors.New(fmt.Sprintf("HrpcClientCodec: Unknown method name %s",
+			rr.ServiceMethod))
+	}
+	mh := new(codec.MsgpackHandle)
+	mh.WriteExt = true
+	w := bytes.NewBuffer(make([]byte, 0, 2048))
+
+	var err error
+	enc := codec.NewEncoder(w, mh)
+	if methodId == common.METHOD_ID_WRITE_SPANS {
+		spans := msg.([]*common.Span)
+		req := &common.WriteSpansReq{
+			NumSpans: len(spans),
+		}
+		err = enc.Encode(req)
+		if err != nil {
+			return errors.New(fmt.Sprintf("HrpcClientCodec: Unable to marshal "+
+				"message as msgpack: %s", err.Error()))
+		}
+		for spanIdx := range spans {
+			err = enc.Encode(spans[spanIdx])
+			if err != nil {
+				return errors.New(fmt.Sprintf("HrpcClientCodec: Unable to marshal "+
+					"span %d out of %d as msgpack: %s", spanIdx, len(spans), err.Error()))
+			}
+		}
+	} else {
+		err = enc.Encode(msg)
+		if err != nil {
+			return errors.New(fmt.Sprintf("HrpcClientCodec: Unable to marshal "+
+				"message as msgpack: %s", err.Error()))
+		}
+	}
+	buf := w.Bytes()
+	if len(buf) > common.MAX_HRPC_BODY_LENGTH {
+		return errors.New(fmt.Sprintf("HrpcClientCodec: message body is %d "+
+			"bytes, but the maximum message size is %d bytes.",
+			len(buf), common.MAX_HRPC_BODY_LENGTH))
+	}
+	hdr := common.HrpcRequestHeader{
+		Magic:    common.HRPC_MAGIC,
+		MethodId: methodId,
+		Seq:      rr.Seq,
+		Length:   uint32(len(buf)),
+	}
+	err = binary.Write(cdc.rwc, binary.LittleEndian, &hdr)
+	if err != nil {
+		return errors.New(fmt.Sprintf("Error writing header bytes: %s",
+			err.Error()))
+	}
+	if cdc.testHooks != nil && cdc.testHooks.HandleWriteRequestBody != nil {
+		cdc.testHooks.HandleWriteRequestBody()
+	}
+	_, err = cdc.rwc.Write(buf)
+	if err != nil {
+		return errors.New(fmt.Sprintf("Error writing body bytes: %s",
+			err.Error()))
+	}
+	return nil
+}
+
+func (cdc *HrpcClientCodec) ReadResponseHeader(resp *rpc.Response) error {
+	hdr := common.HrpcResponseHeader{}
+	err := binary.Read(cdc.rwc, binary.LittleEndian, &hdr)
+	if err != nil {
+		return errors.New(fmt.Sprintf("Error reading response header "+
+			"bytes: %s", err.Error()))
+	}
+	resp.ServiceMethod = common.HrpcMethodIdToMethodName(hdr.MethodId)
+	if resp.ServiceMethod == "" {
+		return errors.New(fmt.Sprintf("Error reading response header: "+
+			"invalid method ID %d.", hdr.MethodId))
+	}
+	resp.Seq = hdr.Seq
+	if hdr.ErrLength > 0 {
+		if hdr.ErrLength > common.MAX_HRPC_ERROR_LENGTH {
+			return errors.New(fmt.Sprintf("Error reading response header: "+
+				"error message was %d bytes long, but "+
+				"MAX_HRPC_ERROR_LENGTH is %d.",
+				hdr.ErrLength, common.MAX_HRPC_ERROR_LENGTH))
+		}
+		buf := make([]byte, hdr.ErrLength)
+		var nread int
+		nread, err = cdc.rwc.Read(buf)
+		if uint32(nread) != hdr.ErrLength {
+			return errors.New(fmt.Sprintf("Error reading response header: "+
+				"failed to read %d bytes of error message.", nread))
+		}
+		if err != nil {
+			return errors.New(fmt.Sprintf("Error reading response header: "+
+				"failed to read %d bytes of error message: %s",
+				nread, err.Error()))
+		}
+		resp.Error = string(buf)
+	} else {
+		resp.Error = ""
+	}
+	cdc.length = hdr.Length
+	return nil
+}
+
+func (cdc *HrpcClientCodec) ReadResponseBody(body interface{}) error {
+	mh := new(codec.MsgpackHandle)
+	mh.WriteExt = true
+	dec := codec.NewDecoder(io.LimitReader(cdc.rwc, int64(cdc.length)), mh)
+	err := dec.Decode(body)
+	if err != nil {
+		return errors.New(fmt.Sprintf("Failed to read response body: %s",
+			err.Error()))
+	}
+	return nil
+}
+
+func (cdc *HrpcClientCodec) Close() error {
+	return cdc.rwc.Close()
+}
+
+func newHClient(hrpcAddr string, testHooks *TestHooks) (*hClient, error) {
+	hcr := hClient{}
+	conn, err := net.Dial("tcp", hrpcAddr)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("Error contacting the HRPC server "+
+			"at %s: %s", hrpcAddr, err.Error()))
+	}
+	hcr.rpcClient = rpc.NewClientWithCodec(&HrpcClientCodec{
+		rwc:       conn,
+		testHooks: testHooks,
+	})
+	return &hcr, nil
+}
+
+func (hcr *hClient) writeSpans(spans []*common.Span) error {
+	resp := common.WriteSpansResp{}
+	return hcr.rpcClient.Call(common.METHOD_NAME_WRITE_SPANS, spans, &resp)
+}
+
+func (hcr *hClient) Close() {
+	hcr.rpcClient.Close()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/log.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/log.go b/htrace-htraced/go/src/htrace/common/log.go
new file mode 100644
index 0000000..16c94b4
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/log.go
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"errors"
+	"fmt"
+	"htrace/conf"
+	"log"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+// A logSink is a place logs can be written to.
+type logSink struct {
+	path     logPath
+	file     *os.File
+	lock     sync.Mutex
+	refCount int // protected by logFilesLock
+}
+
+// Write to the logSink.
+func (sink *logSink) write(str string) {
+	sink.lock.Lock()
+	defer sink.lock.Unlock()
+	_, err := sink.file.Write([]byte(str))
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Error logging to '%s': %s\n", sink.path, err.Error())
+	}
+}
+
+// Unreference the logSink.  If there are no more references, and the logSink is
+// closeable, then we will close it here.
+func (sink *logSink) Unref() {
+	logFilesLock.Lock()
+	defer logFilesLock.Unlock()
+	sink.refCount--
+	if sink.refCount <= 0 {
+		if sink.path.IsCloseable() {
+			err := sink.file.Close()
+			if err != nil {
+				fmt.Fprintf(os.Stderr, "Error closing log file %s: %s\n",
+					sink.path, err.Error())
+			}
+		}
+		logSinks[sink.path] = nil
+	}
+}
+
+type logPath string
+
+// An empty LogPath represents "stdout."
+const STDOUT_LOG_PATH = ""
+
+// Convert a path to a logPath.
+func logPathFromString(path string) logPath {
+	if path == STDOUT_LOG_PATH {
+		return logPath("")
+	}
+	absPath, err := filepath.Abs(path)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to get absolute path of %s: %s\n",
+			path, err.Error())
+		return logPath(path)
+	}
+	return logPath(absPath)
+}
+
+// Convert the path to a human-readable string.
+func (path logPath) String() string {
+	if path == "" {
+		return "(stdout)"
+	} else {
+		return string(path)
+	}
+}
+
+// Return true if the path is closeable.  stdout is not closeable.
+func (path logPath) IsCloseable() bool {
+	return path != STDOUT_LOG_PATH
+}
+
+func (path logPath) Open() *logSink {
+	if path == STDOUT_LOG_PATH {
+		return &logSink{path: path, file: os.Stdout}
+	}
+	file, err := os.OpenFile(string(path), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+	if err != nil {
+		sink := &logSink{path: STDOUT_LOG_PATH, file: os.Stdout}
+		fmt.Fprintf(os.Stderr, "Failed to open log file %s: %s\n",
+			path, err.Error())
+		return sink
+	}
+	return &logSink{path: path, file: file}
+}
+
+var logFilesLock sync.Mutex
+
+var logSinks map[logPath]*logSink = make(map[logPath]*logSink)
+
+func getOrCreateLogSink(pathStr string) *logSink {
+	path := logPathFromString(pathStr)
+	logFilesLock.Lock()
+	defer logFilesLock.Unlock()
+	sink := logSinks[path]
+	if sink == nil {
+		sink = path.Open()
+		logSinks[path] = sink
+	}
+	sink.refCount++
+	return sink
+}
+
+type Level int
+
+const (
+	TRACE Level = iota
+	DEBUG
+	INFO
+	WARN
+	ERROR
+)
+
+var levelToString map[Level]string = map[Level]string{
+	TRACE: "TRACE",
+	DEBUG: "DEBUG",
+	INFO:  "INFO",
+	WARN:  "WARN",
+	ERROR: "ERROR",
+}
+
+func (level Level) String() string {
+	return levelToString[level]
+}
+
+func (level Level) LogString() string {
+	return level.String()[0:1]
+}
+
+func LevelFromString(str string) (Level, error) {
+	for k, v := range levelToString {
+		if strings.ToLower(v) == strings.ToLower(str) {
+			return k, nil
+		}
+	}
+	var levelNames sort.StringSlice
+	levelNames = make([]string, len(levelToString))
+	var i int
+	for _, v := range levelToString {
+		levelNames[i] = v
+		i++
+	}
+	sort.Sort(levelNames)
+	return TRACE, errors.New(fmt.Sprintf("No such level as '%s'.  Valid "+
+		"levels are '%v'\n", str, levelNames))
+}
+
+type Logger struct {
+	sink  *logSink
+	Level Level
+}
+
+func NewLogger(faculty string, cnf *conf.Config) *Logger {
+	path, level := parseConf(faculty, cnf)
+	sink := getOrCreateLogSink(path)
+	return &Logger{sink: sink, Level: level}
+}
+
+func parseConf(faculty string, cnf *conf.Config) (string, Level) {
+	facultyLogPathKey := faculty + "." + conf.HTRACE_LOG_PATH
+	var facultyLogPath string
+	if cnf.Contains(facultyLogPathKey) {
+		facultyLogPath = cnf.Get(facultyLogPathKey)
+	} else {
+		facultyLogPath = cnf.Get(conf.HTRACE_LOG_PATH)
+	}
+	facultyLogLevelKey := faculty + "." + conf.HTRACE_LOG_LEVEL
+	var facultyLogLevelStr string
+	if cnf.Contains(facultyLogLevelKey) {
+		facultyLogLevelStr = cnf.Get(facultyLogLevelKey)
+	} else {
+		facultyLogLevelStr = cnf.Get(conf.HTRACE_LOG_LEVEL)
+	}
+	level, err := LevelFromString(facultyLogLevelStr)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Error configuring log level: %s.  Using TRACE.\n")
+		level = TRACE
+	}
+	return facultyLogPath, level
+}
+
+func (lg *Logger) Trace(str string) {
+	lg.Write(TRACE, str)
+}
+
+func (lg *Logger) Tracef(format string, v ...interface{}) {
+	lg.Write(TRACE, fmt.Sprintf(format, v...))
+}
+
+func (lg *Logger) Debug(str string) {
+	lg.Write(DEBUG, str)
+}
+
+func (lg *Logger) Debugf(format string, v ...interface{}) {
+	lg.Write(DEBUG, fmt.Sprintf(format, v...))
+}
+
+func (lg *Logger) Info(str string) {
+	lg.Write(INFO, str)
+}
+
+func (lg *Logger) Infof(format string, v ...interface{}) {
+	lg.Write(INFO, fmt.Sprintf(format, v...))
+}
+
+func (lg *Logger) Warn(str string) error {
+	lg.Write(WARN, str)
+	return errors.New(str)
+}
+
+func (lg *Logger) Warnf(format string, v ...interface{}) error {
+	str := fmt.Sprintf(format, v...)
+	lg.Write(WARN, str)
+	return errors.New(str)
+}
+
+func (lg *Logger) Error(str string) error {
+	lg.Write(ERROR, str)
+	return errors.New(str)
+}
+
+func (lg *Logger) Errorf(format string, v ...interface{}) error {
+	str := fmt.Sprintf(format, v...)
+	lg.Write(ERROR, str)
+	return errors.New(str)
+}
+
+func (lg *Logger) Write(level Level, str string) {
+	if level >= lg.Level {
+		lg.sink.write(time.Now().UTC().Format(time.RFC3339) + " " +
+			level.LogString() + ": " + str)
+	}
+}
+
+//
+// A few functions which can be used to determine if a certain level of tracing
+// is enabled.  These are useful in situations when evaluating the parameters
+// of a logging function is expensive.  (Note, however, that we don't pay the
+// cost of string concatenation and manipulation when a log message doesn't
+// trigger.)
+//
+
+func (lg *Logger) TraceEnabled() bool {
+	return lg.Level <= TRACE
+}
+
+func (lg *Logger) DebugEnabled() bool {
+	return lg.Level <= DEBUG
+}
+
+func (lg *Logger) InfoEnabled() bool {
+	return lg.Level <= INFO
+}
+
+func (lg *Logger) WarnEnabled() bool {
+	return lg.Level <= WARN
+}
+
+func (lg *Logger) ErrorEnabled() bool {
+	return lg.Level <= ERROR
+}
+
+func (lg *Logger) LevelEnabled(level Level) bool {
+	return lg.Level <= level
+}
+
+func (lg *Logger) Close() {
+	lg.sink.Unref()
+	lg.sink = nil
+}
+
+// Wraps an htrace logger in a golang standard logger.
+//
+// This is a bit messy because of the difference in interfaces between the
+// golang standard logger and the htrace logger.  The golang standard logger
+// doesn't support log levels directly, so you must choose up front what htrace
+// log level all messages should be treated as.  Golang standard loggers expect
+// to be able to write to an io.Writer, but make no guarantees about whether
+// they will break messages into multiple Write() calls (although this does
+// not seem to be a major problem in practice.)
+//
+// Despite these limitations, it's still useful to have this method to be able
+// to log things that come out of the go HTTP server and other standard library
+// systems.
+type WrappedLogger struct {
+	lg    *Logger
+	level Level
+}
+
+func (lg *Logger) Wrap(prefix string, level Level) *log.Logger {
+	wlg := &WrappedLogger{
+		lg:    lg,
+		level: level,
+	}
+	return log.New(wlg, prefix, 0)
+}
+
+func (wlg *WrappedLogger) Write(p []byte) (int, error) {
+	wlg.lg.Write(wlg.level, string(p))
+	return len(p), nil
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/log_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/log_test.go b/htrace-htraced/go/src/htrace/common/log_test.go
new file mode 100644
index 0000000..adb6a57
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/log_test.go
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"bufio"
+	"fmt"
+	"htrace/conf"
+	"io"
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+)
+
+func newLogger(faculty string, args ...string) *Logger {
+	cnfBld := conf.Builder{Defaults: conf.DEFAULTS}
+	cnf, err := cnfBld.Build()
+	if err != nil {
+		panic(fmt.Sprintf("failed to create conf: %s", err.Error()))
+	}
+	cnf2 := cnf.Clone(args...)
+	lg := NewLogger(faculty, cnf2)
+	return lg
+}
+
+func TestNewLogger(t *testing.T) {
+	lg := newLogger("foo", "log.level", "TRACE")
+	lg.Close()
+}
+
+func verifyLines(t *testing.T, rdr io.Reader, lines []string) {
+	scanner := bufio.NewScanner(rdr)
+	lineIdx := 0
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, lines[lineIdx]) {
+			t.Fatalf("Error on line %d: didn't find substring '%s' in line '%s'\n",
+				(lineIdx + 1), lines[lineIdx], line)
+		}
+		lineIdx++
+	}
+	if err := scanner.Err(); err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestFileLogs(t *testing.T) {
+	tempDir, err := ioutil.TempDir(os.TempDir(), "testFileLogs")
+	if err != nil {
+		panic(fmt.Sprintf("error creating tempdir: %s\n", err.Error()))
+	}
+	defer os.RemoveAll(tempDir)
+	logPath := tempDir + conf.PATH_SEP + "log"
+	lg := newLogger("foo", "log.level", "DEBUG",
+		"foo.log.level", "INFO",
+		"log.path", logPath)
+	lg.Tracef("Non-important stuff, ignore this.\n")
+	lg.Infof("problem with the foobar\n")
+	lg.Tracef("More non-important stuff, also ignore this.\n")
+	lg.Infof("and another problem with the foobar\n")
+	logFile, err := os.Open(logPath)
+	if err != nil {
+		t.Fatalf("failed to open file %s: %s\n", logPath, err.Error())
+	}
+	verifyLines(t, logFile, []string{
+		"problem with the foobar",
+		"and another problem with the foobar",
+	})
+	logFile.Close()
+	lg.Close()
+}
+
+func TestMultipleFileLogs(t *testing.T) {
+	tempDir, err := ioutil.TempDir(os.TempDir(), "testMultipleFileLogs")
+	if err != nil {
+		panic(fmt.Sprintf("error creating tempdir: %s\n", err.Error()))
+	}
+	defer os.RemoveAll(tempDir)
+	logPath := tempDir + conf.PATH_SEP + "log"
+	fooLg := newLogger("foo", "log.level", "DEBUG",
+		"foo.log.level", "INFO",
+		"log.path", logPath)
+	fooLg.Infof("The foo needs maintenance.\n")
+	barLg := newLogger("bar", "log.level", "DEBUG",
+		"foo.log.level", "INFO",
+		"log.path", logPath)
+	barLg.Debugf("The bar is open\n")
+	fooLg.Errorf("Fizz buzz\n")
+	logFile, err := os.Open(logPath)
+	if err != nil {
+		t.Fatalf("failed to open file %s: %s\n", logPath, err.Error())
+	}
+	fooLg.Tracef("Fizz buzz2\n")
+	barLg.Tracef("Fizz buzz3\n")
+	verifyLines(t, logFile, []string{
+		"The foo needs maintenance.",
+		"The bar is open",
+		"Fizz buzz",
+		"Fizz buzz3",
+	})
+	logFile.Close()
+	fooLg.Close()
+	barLg.Close()
+}
+
+func TestLogLevelEnabled(t *testing.T) {
+	tempDir, err := ioutil.TempDir(os.TempDir(), "TestLogLevelEnabled")
+	if err != nil {
+		panic(fmt.Sprintf("error creating tempdir: %s\n", err.Error()))
+	}
+	defer os.RemoveAll(tempDir)
+	// set log level to DEBUG for facility "foo"
+	logPath := tempDir + conf.PATH_SEP + "log"
+	lg := newLogger("foo", "log.level", "DEBUG",
+		"foo.log.level", "INFO",
+		"log.path", logPath)
+	if lg.TraceEnabled() {
+		t.Fatalf("foo logger has TraceEnabled")
+	}
+	if lg.DebugEnabled() {
+		t.Fatalf("foo logger have DebugEnabled")
+	}
+	if !lg.InfoEnabled() {
+		t.Fatalf("foo logger does not have InfoEnabled")
+	}
+	if !lg.WarnEnabled() {
+		t.Fatalf("foo logger does not have WarnEnabled")
+	}
+	if !lg.ErrorEnabled() {
+		t.Fatalf("foo logger does not have ErrorEnabled")
+	}
+	lg.Close()
+	lg = newLogger("foo", "log.level", "WARN",
+		"foo.log.level", "INFO",
+		"log.path", logPath)
+	if lg.TraceEnabled() {
+		t.Fatalf("foo logger has TraceEnabled")
+	}
+	if lg.DebugEnabled() {
+		t.Fatalf("foo logger has DebugEnabled")
+	}
+	if !lg.InfoEnabled() {
+		t.Fatalf("foo logger does not have InfoEnabled")
+	}
+	if !lg.WarnEnabled() {
+		t.Fatalf("foo logger does not have WarnEnabled")
+	}
+	if !lg.ErrorEnabled() {
+		t.Fatalf("foo logger does not have ErrorEnabled")
+	}
+	lg.Close()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/process.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/process.go b/htrace-htraced/go/src/htrace/common/process.go
new file mode 100644
index 0000000..8e2a415
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/process.go
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"bytes"
+	"fmt"
+	"htrace/conf"
+	"os"
+	"os/signal"
+	"runtime"
+	"runtime/debug"
+	"syscall"
+)
+
+func InstallSignalHandlers(cnf *conf.Config) {
+	fatalSigs := []os.Signal{
+		os.Interrupt,
+		os.Kill,
+		syscall.SIGINT,
+		syscall.SIGABRT,
+		syscall.SIGALRM,
+		syscall.SIGBUS,
+		syscall.SIGFPE,
+		syscall.SIGILL,
+		syscall.SIGSEGV,
+		syscall.SIGTERM,
+	}
+	fatalSigChan := make(chan os.Signal, 1)
+	signal.Notify(fatalSigChan, fatalSigs...)
+	lg := NewLogger("signal", cnf)
+	go func() {
+		sig := <-fatalSigChan
+		lg.Errorf("Terminating on signal: %v\n", sig)
+		lg.Close()
+		os.Exit(1)
+	}()
+
+	sigQuitChan := make(chan os.Signal, 1)
+	signal.Notify(sigQuitChan, syscall.SIGQUIT)
+	go func() {
+		stackTraceBuf := make([]byte, 1<<20)
+		for {
+			<-sigQuitChan
+			GetStackTraces(&stackTraceBuf)
+			lg.Info("=== received SIGQUIT ===\n")
+			lg.Info("=== GOROUTINE STACKS ===\n")
+			lg.Info(string(stackTraceBuf))
+			lg.Info("\n=== END GOROUTINE STACKS ===\n")
+			lg.Info("=== GC STATISTICS ===\n")
+			lg.Info(GetGCStats())
+			lg.Info("=== END GC STATISTICS ===\n")
+		}
+	}()
+}
+
+func GetStackTraces(buf *[]byte) {
+	*buf = (*buf)[0:cap(*buf)]
+	neededBytes := runtime.Stack(*buf, true)
+	for neededBytes > len(*buf) {
+		*buf = make([]byte, neededBytes)
+		runtime.Stack(*buf, true)
+	}
+	*buf = (*buf)[0:neededBytes]
+}
+
+func GetGCStats() string {
+	gcs := debug.GCStats{}
+	debug.ReadGCStats(&gcs)
+	var buf bytes.Buffer
+	buf.WriteString(fmt.Sprintf("LastGC: %s\n", gcs.LastGC.UTC().String()))
+	buf.WriteString(fmt.Sprintf("NumGC: %d\n", gcs.NumGC))
+	buf.WriteString(fmt.Sprintf("PauseTotal: %v\n", gcs.PauseTotal))
+	if gcs.Pause != nil {
+		pauseStr := ""
+		prefix := ""
+		for p := range gcs.Pause {
+			pauseStr += prefix + gcs.Pause[p].String()
+			prefix = ", "
+		}
+		buf.WriteString(fmt.Sprintf("Pause History: %s\n", pauseStr))
+	}
+	return buf.String()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/process_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/process_test.go b/htrace-htraced/go/src/htrace/common/process_test.go
new file mode 100644
index 0000000..cbbf613
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/process_test.go
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"bufio"
+	"fmt"
+	"htrace/conf"
+	"os"
+	"os/exec"
+	"strings"
+	"syscall"
+	"testing"
+	"time"
+)
+
+const HTRACED_TEST_HELPER_PROCESS = "HTRACED_TEST_HELPER_PROCESS"
+
+// This test runs a helper process which will install our htraced signal
+// handlers.  We will send signals to the subprocess and verify that it has
+// caught them and responded appropriately.
+func TestSignals(t *testing.T) {
+	if os.Getenv(HTRACED_TEST_HELPER_PROCESS) == "1" {
+		runHelperProcess()
+		os.Exit(0)
+	}
+	helper := exec.Command(os.Args[0], "-test.run=TestSignals", "--")
+	helper.Env = []string{HTRACED_TEST_HELPER_PROCESS + "=1"}
+	stdoutPipe, err := helper.StdoutPipe()
+	if err != nil {
+		panic(fmt.Sprintf("Failed to open pipe to process stdout: %s",
+			err.Error()))
+	}
+	stderrPipe, err := helper.StderrPipe()
+	if err != nil {
+		panic(fmt.Sprintf("Failed to open pipe to process stderr: %s",
+			err.Error()))
+	}
+	err = helper.Start()
+	if err != nil {
+		t.Fatal("Failed to start command %s: %s\n", os.Args[0], err.Error())
+	}
+	t.Logf("Started suprocess...\n")
+	done := make(chan interface{})
+	go func() {
+		scanner := bufio.NewScanner(stdoutPipe)
+		for scanner.Scan() {
+			text := scanner.Text()
+			if strings.Contains(text, "=== GOROUTINE STACKS ===") {
+				break
+			}
+		}
+		t.Logf("Saw 'GOROUTINE STACKS on stdout.'  Sending SIGINT.\n")
+		helper.Process.Signal(syscall.SIGINT)
+		for scanner.Scan() {
+			text := scanner.Text()
+			if strings.Contains(text, "Terminating on signal: SIGINT") {
+				break
+			}
+		}
+		t.Logf("Saw 'Terminating on signal: SIGINT'.  " +
+			"Helper goroutine exiting.\n")
+		done <- nil
+	}()
+	scanner := bufio.NewScanner(stderrPipe)
+	for scanner.Scan() {
+		text := scanner.Text()
+		if strings.Contains(text, "Signal handler installed.") {
+			break
+		}
+	}
+	t.Logf("Saw 'Signal handler installed.'  Sending SIGINT.")
+	helper.Process.Signal(syscall.SIGQUIT)
+	t.Logf("Waiting for helper goroutine to exit.\n")
+	<-done
+	t.Logf("Waiting for subprocess to exit.\n")
+	helper.Wait()
+	t.Logf("Done.")
+}
+
+// Run the helper process which TestSignals spawns.
+func runHelperProcess() {
+	cnfMap := map[string]string{
+		conf.HTRACE_LOG_LEVEL: "TRACE",
+		conf.HTRACE_LOG_PATH:  "", // log to stdout
+	}
+	cnfBld := conf.Builder{Values: cnfMap, Defaults: conf.DEFAULTS}
+	cnf, err := cnfBld.Build()
+	if err != nil {
+		fmt.Printf("Error building configuration: %s\n", err.Error())
+		os.Exit(1)
+	}
+	InstallSignalHandlers(cnf)
+	fmt.Fprintf(os.Stderr, "Signal handler installed.\n")
+	// Wait for a signal to be delivered
+	for {
+		time.Sleep(time.Hour * 100)
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/query.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/query.go b/htrace-htraced/go/src/htrace/common/query.go
new file mode 100644
index 0000000..7a9e523
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/query.go
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"encoding/json"
+)
+
+//
+// Represents queries that can be sent to htraced.
+//
+// Each query consists of set of predicates that will be 'AND'ed together to
+// return a set of spans.  Predicates contain an operation, a field, and a
+// value.
+//
+// For example, a query might be "return the first 100 spans between 5:00pm
+// and 5:01pm"  This query would have two predicates: time greater than or
+// equal to 5:00pm, and time less than or equal to 5:01pm.
+// In HTrace, times are always expressed in milliseconds since the Epoch.
+// So this would become:
+// { "lim" : 100, "pred" : [
+//   { "op" : "ge", "field" : "begin", "val" : 1234 },
+//   { "op" : "le", "field" : "begin", "val" : 5678 },
+// ] }
+//
+// Where '1234' and '5678' were replaced by times since the epoch in
+// milliseconds.
+//
+
+type Op string
+
+const (
+	CONTAINS               Op = "cn"
+	EQUALS                 Op = "eq"
+	LESS_THAN_OR_EQUALS    Op = "le"
+	GREATER_THAN_OR_EQUALS Op = "ge"
+	GREATER_THAN           Op = "gt"
+)
+
+func (op Op) IsDescending() bool {
+	return op == LESS_THAN_OR_EQUALS
+}
+
+func (op Op) IsValid() bool {
+	ops := ValidOps()
+	for i := range ops {
+		if ops[i] == op {
+			return true
+		}
+	}
+	return false
+}
+
+func ValidOps() []Op {
+	return []Op{CONTAINS, EQUALS, LESS_THAN_OR_EQUALS, GREATER_THAN_OR_EQUALS,
+		GREATER_THAN}
+}
+
+type Field string
+
+const (
+	SPAN_ID     Field = "spanid"
+	DESCRIPTION Field = "description"
+	BEGIN_TIME  Field = "begin"
+	END_TIME    Field = "end"
+	DURATION    Field = "duration"
+	TRACER_ID   Field = "tracerid"
+)
+
+func (field Field) IsValid() bool {
+	fields := ValidFields()
+	for i := range fields {
+		if fields[i] == field {
+			return true
+		}
+	}
+	return false
+}
+
+func ValidFields() []Field {
+	return []Field{SPAN_ID, DESCRIPTION, BEGIN_TIME, END_TIME,
+		DURATION, TRACER_ID}
+}
+
+type Predicate struct {
+	Op    Op     `json:"op"`
+	Field Field  `json:"field"`
+	Val   string `val:"val"`
+}
+
+func (pred *Predicate) String() string {
+	buf, err := json.Marshal(pred)
+	if err != nil {
+		panic(err)
+	}
+	return string(buf)
+}
+
+type Query struct {
+	Predicates []Predicate `json:"pred"`
+	Lim        int         `json:"lim"`
+	Prev       *Span       `json:"prev"`
+}
+
+func (query *Query) String() string {
+	buf, err := json.Marshal(query)
+	if err != nil {
+		panic(err)
+	}
+	return string(buf)
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/query_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/query_test.go b/htrace-htraced/go/src/htrace/common/query_test.go
new file mode 100644
index 0000000..2697d9c
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/query_test.go
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"testing"
+)
+
+func TestValidOps(t *testing.T) {
+	for i := range ValidOps() {
+		op := ValidOps()[i]
+		if !op.IsValid() {
+			t.Fatalf("op %s was in ValidOps, but IsValid returned false.\n", op)
+		}
+	}
+	invalidOp := Op("completelybogus")
+	if invalidOp.IsValid() {
+		t.Fatalf("op %s was invalid, but IsValid returned true.\n", invalidOp)
+	}
+}
+
+func TestValidFields(t *testing.T) {
+	for i := range ValidFields() {
+		field := ValidFields()[i]
+		if !field.IsValid() {
+			t.Fatalf("field %s was in ValidFields, but IsValid returned false.\n", field)
+		}
+	}
+	invalidField := Field("completelybogus")
+	if invalidField.IsValid() {
+		t.Fatalf("field %s was invalid, but IsValid returned true.\n", invalidField)
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/rpc.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/rpc.go b/htrace-htraced/go/src/htrace/common/rpc.go
new file mode 100644
index 0000000..5f02db6
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/rpc.go
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+// The 4-byte magic number which is sent first in the HRPC header
+const HRPC_MAGIC = 0x43525448
+
+// Method ID codes.  Do not reorder these.
+const (
+	METHOD_ID_NONE        = 0
+	METHOD_ID_WRITE_SPANS = iota
+)
+
+const METHOD_NAME_WRITE_SPANS = "HrpcHandler.WriteSpans"
+
+// Maximum length of the error message passed in an HRPC response
+const MAX_HRPC_ERROR_LENGTH = 4 * 1024 * 1024
+
+// Maximum length of HRPC message body
+const MAX_HRPC_BODY_LENGTH = 32 * 1024 * 1024
+
+// A request to write spans to htraced.
+// This request is followed by a sequence of spans.
+type WriteSpansReq struct {
+	DefaultTrid string `json:",omitempty"`
+	NumSpans    int
+}
+
+// Info returned by /server/version
+type ServerVersion struct {
+	// The server release version.
+	ReleaseVersion string
+
+	// The git hash that this software was built with.
+	GitVersion string
+}
+
+// A response to a WriteSpansReq
+type WriteSpansResp struct {
+}
+
+// The header which is sent over the wire for HRPC
+type HrpcRequestHeader struct {
+	Magic    uint32
+	MethodId uint32
+	Seq      uint64
+	Length   uint32
+}
+
+// The response which is sent over the wire for HRPC
+type HrpcResponseHeader struct {
+	Seq       uint64
+	MethodId  uint32
+	ErrLength uint32
+	Length    uint32
+}
+
+func HrpcMethodIdToMethodName(id uint32) string {
+	switch id {
+	case METHOD_ID_WRITE_SPANS:
+		return METHOD_NAME_WRITE_SPANS
+	default:
+		return ""
+	}
+}
+
+func HrpcMethodNameToId(name string) uint32 {
+	switch name {
+	case METHOD_NAME_WRITE_SPANS:
+		return METHOD_ID_WRITE_SPANS
+	default:
+		return METHOD_ID_NONE
+	}
+}
+
+type SpanMetrics struct {
+	// The total number of spans written to HTraced.
+	Written uint64
+
+	// The total number of spans dropped by the server.
+	ServerDropped uint64
+}
+
+// A map from network address strings to SpanMetrics structures.
+type SpanMetricsMap map[string]*SpanMetrics
+
+// Info returned by /server/stats
+type ServerStats struct {
+	// Statistics for each shard (directory)
+	Dirs []StorageDirectoryStats
+
+	// Per-host Span Metrics
+	HostSpanMetrics SpanMetricsMap
+
+	// The time (in UTC milliseconds since the epoch) when the
+	// datastore was last started.
+	LastStartMs int64
+
+	// The current time (in UTC milliseconds since the epoch) on the server.
+	CurMs int64
+
+	// The total number of spans which have been reaped.
+	ReapedSpans uint64
+
+	// The total number of spans which have been ingested since the server started, by WriteSpans
+	// requests.  This number counts spans that didn't get written to persistent storage as well as
+	// those that did.
+	IngestedSpans uint64
+
+	// The total number of spans which have been written to leveldb since the server started.
+	WrittenSpans uint64
+
+	// The total number of spans dropped by the server since the server started.
+	ServerDroppedSpans uint64
+
+	// The maximum latency of a writeSpans request, in milliseconds.
+	MaxWriteSpansLatencyMs uint32
+
+	// The average latency of a writeSpans request, in milliseconds.
+	AverageWriteSpansLatencyMs uint32
+}
+
+type StorageDirectoryStats struct {
+	Path string
+
+	// The approximate number of bytes on disk present in this shard.
+	ApproximateBytes uint64
+
+	// leveldb.stats information
+	LevelDbStats string
+}
+
+type ServerDebugInfoReq struct {
+}
+
+type ServerDebugInfo struct {
+	// Stack traces from all goroutines
+	StackTraces string
+
+	// Garbage collection statistics
+	GCStats string
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/semaphore.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/semaphore.go b/htrace-htraced/go/src/htrace/common/semaphore.go
new file mode 100644
index 0000000..1acde76
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/semaphore.go
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"sync"
+)
+
+// A simple lock-and-condition-variable based semaphore implementation.
+type Semaphore struct {
+	lock  sync.Mutex
+	cond  *sync.Cond
+	count int64
+}
+
+func NewSemaphore(count int64) *Semaphore {
+	sem := &Semaphore{
+		count: int64(count),
+	}
+	sem.cond = &sync.Cond{
+		L: &sem.lock,
+	}
+	return sem
+}
+
+func (sem *Semaphore) Post() {
+	sem.lock.Lock()
+	sem.count++
+	if sem.count > 0 {
+		sem.cond.Broadcast()
+	}
+	sem.lock.Unlock()
+}
+
+func (sem *Semaphore) Posts(amt int64) {
+	sem.lock.Lock()
+	sem.count += amt
+	if sem.count > 0 {
+		sem.cond.Broadcast()
+	}
+	sem.lock.Unlock()
+}
+
+func (sem *Semaphore) Wait() {
+	sem.lock.Lock()
+	for {
+		if sem.count > 0 {
+			sem.count--
+			sem.lock.Unlock()
+			return
+		}
+		sem.cond.Wait()
+	}
+}
+
+func (sem *Semaphore) Waits(amt int64) {
+	var i int64
+	for i = 0; i < amt; i++ {
+		sem.Wait()
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/semaphore_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/semaphore_test.go b/htrace-htraced/go/src/htrace/common/semaphore_test.go
new file mode 100644
index 0000000..089c51b
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/semaphore_test.go
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"sync/atomic"
+	"testing"
+	"time"
+)
+
+func TestSemaphoreWake(t *testing.T) {
+	var done uint32
+	sem := NewSemaphore(0)
+	go func() {
+		time.Sleep(10 * time.Nanosecond)
+		atomic.AddUint32(&done, 1)
+		sem.Post()
+	}()
+	sem.Wait()
+	doneVal := atomic.LoadUint32(&done)
+	if doneVal != 1 {
+		t.Fatalf("sem.Wait did not wait for sem.Post")
+	}
+}
+
+func TestSemaphoreCount(t *testing.T) {
+	sem := NewSemaphore(1)
+	sem.Post()
+	sem.Wait()
+	sem.Wait()
+
+	sem = NewSemaphore(-1)
+	sem.Post()
+	sem.Post()
+	sem.Wait()
+}
+
+func TestSemaphoreMultipleGoroutines(t *testing.T) {
+	var done uint32
+	sem := NewSemaphore(0)
+	sem2 := NewSemaphore(0)
+	go func() {
+		sem.Wait()
+		atomic.AddUint32(&done, 1)
+		sem2.Post()
+	}()
+	go func() {
+		time.Sleep(10 * time.Nanosecond)
+		atomic.AddUint32(&done, 1)
+		sem.Post()
+	}()
+	go func() {
+		time.Sleep(20 * time.Nanosecond)
+		atomic.AddUint32(&done, 1)
+		sem.Post()
+	}()
+	sem.Wait()
+	go func() {
+		time.Sleep(10 * time.Nanosecond)
+		atomic.AddUint32(&done, 1)
+		sem.Post()
+	}()
+	sem.Wait()
+	sem2.Wait()
+	doneVal := atomic.LoadUint32(&done)
+	if doneVal != 4 {
+		t.Fatalf("sem.Wait did not wait for sem.Posts")
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/span.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/span.go b/htrace-htraced/go/src/htrace/common/span.go
new file mode 100644
index 0000000..1716c5a
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/span.go
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"hash/fnv"
+)
+
+//
+// Represents a trace span.
+//
+// Compatibility notes:
+// When converting to JSON, we store the 64-bit numbers as hexadecimal strings rather than as
+// integers.  This is because JavaScript lacks the ability to handle 64-bit integers.  Numbers above
+// about 55 bits will be rounded by Javascript.  Since the Javascript UI is a primary consumer of
+// this JSON data, we have to simply pass it as a string.
+//
+
+type TraceInfoMap map[string]string
+
+type TimelineAnnotation struct {
+	Time int64  `json:"t"`
+	Msg  string `json:"m"`
+}
+
+type SpanId []byte
+
+var INVALID_SPAN_ID SpanId = make([]byte, 16) // all zeroes
+
+func (id SpanId) String() string {
+	return fmt.Sprintf("%02x%02x%02x%02x"+
+		"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
+		id[0], id[1], id[2], id[3], id[4], id[5], id[6], id[7], id[8],
+		id[9], id[10], id[11], id[12], id[13], id[14], id[15])
+}
+
+func (id SpanId) Val() []byte {
+	return []byte(id)
+}
+
+func (id SpanId) FindProblem() string {
+	if id == nil {
+		return "The span ID is nil"
+	}
+	if len(id) != 16 {
+		return "The span ID is not exactly 16 bytes."
+	}
+	if bytes.Equal(id.Val(), INVALID_SPAN_ID.Val()) {
+		return "The span ID is all zeros."
+	}
+	return ""
+}
+
+func (id SpanId) ToArray() [16]byte {
+	var ret [16]byte
+	copy(ret[:], id.Val()[:])
+	return ret
+}
+
+// Return the next ID in lexicographical order.  For the maximum ID,
+// returns the minimum.
+func (id SpanId) Next() SpanId {
+	next := make([]byte, 16)
+	copy(next, id)
+	for i := len(next) - 1; i >= 0; i-- {
+		if next[i] == 0xff {
+			next[i] = 0
+		} else {
+			next[i] = next[i] + 1
+			break
+		}
+	}
+	return next
+}
+
+// Return the previous ID in lexicographical order.  For the minimum ID,
+// returns the maximum ID.
+func (id SpanId) Prev() SpanId {
+	prev := make([]byte, 16)
+	copy(prev, id)
+	for i := len(prev) - 1; i >= 0; i-- {
+		if prev[i] == 0x00 {
+			prev[i] = 0xff
+		} else {
+			prev[i] = prev[i] - 1
+			break
+		}
+	}
+	return prev
+}
+
+func (id SpanId) MarshalJSON() ([]byte, error) {
+	return []byte(`"` + id.String() + `"`), nil
+}
+
+func (id SpanId) Compare(other SpanId) int {
+	return bytes.Compare(id.Val(), other.Val())
+}
+
+func (id SpanId) Equal(other SpanId) bool {
+	return bytes.Equal(id.Val(), other.Val())
+}
+
+func (id SpanId) Hash32() uint32 {
+	h := fnv.New32a()
+	h.Write(id.Val())
+	return h.Sum32()
+}
+
+type SpanSlice []*Span
+
+func (s SpanSlice) Len() int {
+	return len(s)
+}
+
+func (s SpanSlice) Less(i, j int) bool {
+	return s[i].Id.Compare(s[j].Id) < 0
+}
+
+func (s SpanSlice) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+type SpanIdSlice []SpanId
+
+func (s SpanIdSlice) Len() int {
+	return len(s)
+}
+
+func (s SpanIdSlice) Less(i, j int) bool {
+	return s[i].Compare(s[j]) < 0
+}
+
+func (s SpanIdSlice) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+const DOUBLE_QUOTE = 0x22
+
+func (id *SpanId) UnmarshalJSON(b []byte) error {
+	if b[0] != DOUBLE_QUOTE {
+		return errors.New("Expected spanID to start with a string quote.")
+	}
+	if b[len(b)-1] != DOUBLE_QUOTE {
+		return errors.New("Expected spanID to end with a string quote.")
+	}
+	return id.FromString(string(b[1 : len(b)-1]))
+}
+
+func (id *SpanId) FromString(str string) error {
+	i := SpanId(make([]byte, 16))
+	n, err := fmt.Sscanf(str, "%02x%02x%02x%02x"+
+		"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
+		&i[0], &i[1], &i[2], &i[3], &i[4], &i[5], &i[6], &i[7], &i[8],
+		&i[9], &i[10], &i[11], &i[12], &i[13], &i[14], &i[15])
+	if err != nil {
+		return err
+	}
+	if n != 16 {
+		return errors.New("Failed to find 16 hex digits in the SpanId")
+	}
+	*id = i
+	return nil
+}
+
+type SpanData struct {
+	Begin               int64                `json:"b"`
+	End                 int64                `json:"e"`
+	Description         string               `json:"d"`
+	Parents             []SpanId             `json:"p"`
+	Info                TraceInfoMap         `json:"n,omitempty"`
+	TracerId            string               `json:"r"`
+	TimelineAnnotations []TimelineAnnotation `json:"t,omitempty"`
+}
+
+type Span struct {
+	Id SpanId `json:"a"`
+	SpanData
+}
+
+func (span *Span) ToJson() []byte {
+	jbytes, err := json.Marshal(*span)
+	if err != nil {
+		panic(err)
+	}
+	return jbytes
+}
+
+func (span *Span) String() string {
+	return string(span.ToJson())
+}
+
+// Compute the span duration.  We ignore overflow since we never deal with negative times.
+func (span *Span) Duration() int64 {
+	return span.End - span.Begin
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/span_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/span_test.go b/htrace-htraced/go/src/htrace/common/span_test.go
new file mode 100644
index 0000000..7fb128d
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/span_test.go
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"github.com/ugorji/go/codec"
+	"testing"
+)
+
+func TestSpanToJson(t *testing.T) {
+	t.Parallel()
+	span := Span{Id: TestId("33f25a1a750a471db5bafa59309d7d6f"),
+		SpanData: SpanData{
+			Begin:       123,
+			End:         456,
+			Description: "getFileDescriptors",
+			Parents:     []SpanId{},
+			TracerId:    "testTracerId",
+		}}
+	ExpectStrEqual(t,
+		`{"a":"33f25a1a750a471db5bafa59309d7d6f","b":123,"e":456,"d":"getFileDescriptors","p":[],"r":"testTracerId"}`,
+		string(span.ToJson()))
+}
+
+func TestAnnotatedSpanToJson(t *testing.T) {
+	t.Parallel()
+	span := Span{Id: TestId("11eace42e6404b40a7644214cb779a08"),
+		SpanData: SpanData{
+			Begin:       1234,
+			End:         4567,
+			Description: "getFileDescriptors2",
+			Parents:     []SpanId{},
+			TracerId:    "testAnnotatedTracerId",
+			TimelineAnnotations: []TimelineAnnotation{
+				TimelineAnnotation{
+					Time: 7777,
+					Msg:  "contactedServer",
+				},
+				TimelineAnnotation{
+					Time: 8888,
+					Msg:  "passedFd",
+				},
+			},
+		}}
+	ExpectStrEqual(t,
+		`{"a":"11eace42e6404b40a7644214cb779a08","b":1234,"e":4567,"d":"getFileDescriptors2","p":[],"r":"testAnnotatedTracerId","t":[{"t":7777,"m":"contactedServer"},{"t":8888,"m":"passedFd"}]}`,
+		string(span.ToJson()))
+}
+
+func TestSpanNext(t *testing.T) {
+	ExpectStrEqual(t, TestId("00000000000000000000000000000001").String(),
+		TestId("00000000000000000000000000000000").Next().String())
+	ExpectStrEqual(t, TestId("00000000000000000000000000f00000").String(),
+		TestId("00000000000000000000000000efffff").Next().String())
+	ExpectStrEqual(t, TestId("00000000000000000000000000000000").String(),
+		TestId("ffffffffffffffffffffffffffffffff").Next().String())
+}
+
+func TestSpanPrev(t *testing.T) {
+	ExpectStrEqual(t, TestId("00000000000000000000000000000000").String(),
+		TestId("00000000000000000000000000000001").Prev().String())
+	ExpectStrEqual(t, TestId("00000000000000000000000000efffff").String(),
+		TestId("00000000000000000000000000f00000").Prev().String())
+	ExpectStrEqual(t, TestId("ffffffffffffffffffffffffffffffff").String(),
+		TestId("00000000000000000000000000000000").Prev().String())
+}
+
+func TestSpanMsgPack(t *testing.T) {
+	span := Span{Id: TestId("33f25a1a750a471db5bafa59309d7d6f"),
+		SpanData: SpanData{
+			Begin:       1234,
+			End:         5678,
+			Description: "getFileDescriptors",
+			Parents:     []SpanId{},
+			TracerId:    "testTracerId",
+		}}
+	mh := new(codec.MsgpackHandle)
+	mh.WriteExt = true
+	w := bytes.NewBuffer(make([]byte, 0, 2048))
+	enc := codec.NewEncoder(w, mh)
+	err := enc.Encode(span)
+	if err != nil {
+		t.Fatal("Error encoding span as msgpack: " + err.Error())
+	}
+	buf := w.Bytes()
+	fmt.Printf("span: %s\n", hex.EncodeToString(buf))
+	mh = new(codec.MsgpackHandle)
+	mh.WriteExt = true
+	dec := codec.NewDecoder(bytes.NewReader(buf), mh)
+	var span2 Span
+	err = dec.Decode(&span2)
+	if err != nil {
+		t.Fatal("Failed to reverse msgpack encoding for " + span.String())
+	}
+	ExpectSpansEqual(t, &span, &span2)
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/test_util.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/test_util.go b/htrace-htraced/go/src/htrace/common/test_util.go
new file mode 100644
index 0000000..740354c
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/test_util.go
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+	"time"
+)
+
+type Int64Slice []int64
+
+func (p Int64Slice) Len() int           { return len(p) }
+func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type SupplierFun func() bool
+
+//
+// Wait for a configurable amount of time for a precondition to become true.
+//
+// Example:
+//   WaitFor(time.Minute * 1, time.Millisecond * 1, func() bool {
+//      return ht.Store.GetStatistics().NumSpansWritten >= 3
+//  })
+//
+func WaitFor(dur time.Duration, poll time.Duration, fun SupplierFun) {
+	if poll == 0 {
+		poll = dur / 10
+	}
+	if poll <= 0 {
+		panic("Can't have a polling time less than zero.")
+	}
+	endTime := time.Now().Add(dur)
+	for {
+		if fun() {
+			return
+		}
+		if !time.Now().Before(endTime) {
+			break
+		}
+		time.Sleep(poll)
+	}
+	panic(fmt.Sprintf("Timed out after %s", dur))
+}
+
+// Trigger a test failure if two strings are not equal.
+func ExpectStrEqual(t *testing.T, expect string, actual string) {
+	if expect != actual {
+		t.Fatalf("Expected:\n%s\nGot:\n%s\n", expect, actual)
+	}
+}
+
+// Trigger a test failure if the JSON representation of two spans are not equals.
+func ExpectSpansEqual(t *testing.T, spanA *Span, spanB *Span) {
+	ExpectStrEqual(t, string(spanA.ToJson()), string(spanB.ToJson()))
+}
+
+func TestId(str string) SpanId {
+	var spanId SpanId
+	err := spanId.FromString(str)
+	if err != nil {
+		panic(err.Error())
+	}
+	return spanId
+}
+
+func AssertErrContains(t *testing.T, err error, str string) {
+	if !strings.Contains(err.Error(), str) {
+		t.Fatalf("expected the error to contain %s, but it was %s\n",
+			str, err.Error())
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/time.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/time.go b/htrace-htraced/go/src/htrace/common/time.go
new file mode 100644
index 0000000..8b4b6b8
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/time.go
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"time"
+)
+
+func TimeToUnixMs(t time.Time) int64 {
+	return t.UnixNano() / 1000000
+}
+
+func UnixMsToTime(u int64) time.Time {
+	secs := u / 1000
+	nanos := u - (secs * 1000)
+	return time.Unix(secs, nanos)
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/common/time_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/common/time_test.go b/htrace-htraced/go/src/htrace/common/time_test.go
new file mode 100644
index 0000000..11e2733
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/common/time_test.go
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package common
+
+import (
+	"testing"
+)
+
+func testRoundTrip(t *testing.T, u int64) {
+	tme := UnixMsToTime(u)
+	u2 := TimeToUnixMs(tme)
+	if u2 != u {
+		t.Fatalf("Error taking %d on a round trip: came back as "+
+			"%d instead.\n", u, u2)
+	}
+}
+
+func TestTimeConversions(t *testing.T) {
+	testRoundTrip(t, 0)
+	testRoundTrip(t, 1445540632000)
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/conf/config.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/conf/config.go b/htrace-htraced/go/src/htrace/conf/config.go
new file mode 100644
index 0000000..24170b2
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/conf/config.go
@@ -0,0 +1,302 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package conf
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"strings"
+	"syscall"
+)
+
+//
+// The configuration code for HTraced.
+//
+// HTraced can be configured via Hadoop-style XML configuration files, or by passing -Dkey=value
+// command line arguments.  Command-line arguments without an equals sign, such as "-Dkey", will be
+// treated as setting the key to "true".
+//
+// Configuration key constants should be defined in config_keys.go.  Each key should have a default,
+// which will be used if the user supplies no value, or supplies an invalid value.
+// For that reason, it is not necessary for the Get, GetInt, etc. functions to take a default value
+// argument.
+//
+// Configuration objects are immutable.  However, you can make a copy of a configuration which adds
+// some changes using Configuration#Clone().
+//
+
+type Config struct {
+	settings map[string]string
+	defaults map[string]string
+}
+
+type Builder struct {
+	// If non-nil, the XML configuration file to read.
+	Reader io.Reader
+
+	// If non-nil, the configuration values to use.
+	Values map[string]string
+
+	// If non-nil, the default configuration values to use.
+	Defaults map[string]string
+
+	// If non-nil, the command-line arguments to use.
+	Argv []string
+
+	// The name of the application.  Configuration keys that start with this
+	// string will be converted to their unprefixed forms.
+	AppPrefix string
+}
+
+func getDefaultHTracedConfDir() string {
+	return PATH_SEP + "etc" + PATH_SEP + "htraced" + PATH_SEP + "conf"
+}
+
+func getHTracedConfDirs(dlog io.Writer) []string {
+	confDir := os.Getenv("HTRACED_CONF_DIR")
+	paths := filepath.SplitList(confDir)
+	if len(paths) < 1 {
+		def := getDefaultHTracedConfDir()
+		io.WriteString(dlog, fmt.Sprintf("HTRACED_CONF_DIR defaulting to %s\n", def))
+		return []string{def}
+	}
+	io.WriteString(dlog, fmt.Sprintf("HTRACED_CONF_DIR=%s\n", confDir))
+	return paths
+}
+
+// Load a configuration from the application's argv, configuration file, and the standard
+// defaults.
+func LoadApplicationConfig(appPrefix string) (*Config, io.Reader) {
+	dlog := new(bytes.Buffer)
+	reader := openFile(CONFIG_FILE_NAME, getHTracedConfDirs(dlog), dlog)
+	bld := Builder{}
+	if reader != nil {
+		defer reader.Close()
+		bld.Reader = bufio.NewReader(reader)
+	}
+	bld.Argv = os.Args[1:]
+	bld.Defaults = DEFAULTS
+	bld.AppPrefix = appPrefix
+	cnf, err := bld.Build()
+	if err != nil {
+		log.Fatal("Error building configuration: " + err.Error())
+	}
+	os.Args = append(os.Args[0:1], bld.Argv...)
+	keys := make(sort.StringSlice, 0, 20)
+	for k, _ := range cnf.settings {
+		keys = append(keys, k)
+	}
+	sort.Sort(keys)
+	prefix := ""
+	io.WriteString(dlog, "Read configuration: ")
+	for i := range keys {
+		io.WriteString(dlog, fmt.Sprintf(`%s%s = "%s"`,
+			prefix, keys[i], cnf.settings[keys[i]]))
+		prefix = ", "
+	}
+	return cnf, dlog
+}
+
+// Attempt to open a configuration file somewhere on the provided list of paths.
+func openFile(cnfName string, paths []string, dlog io.Writer) io.ReadCloser {
+	for p := range paths {
+		path := fmt.Sprintf("%s%c%s", paths[p], os.PathSeparator, cnfName)
+		file, err := os.Open(path)
+		if err == nil {
+			io.WriteString(dlog, fmt.Sprintf("Reading configuration from %s.\n", path))
+			return file
+		}
+		if e, ok := err.(*os.PathError); ok && e.Err == syscall.ENOENT {
+			continue
+		}
+		io.WriteString(dlog, fmt.Sprintf("Error opening %s for read: %s\n", path, err.Error()))
+	}
+	return nil
+}
+
+// Try to parse a command-line element as a key=value pair.
+func parseAsConfigFlag(flag string) (string, string) {
+	var confPart string
+	if strings.HasPrefix(flag, "-D") {
+		confPart = flag[2:]
+	} else if strings.HasPrefix(flag, "--D") {
+		confPart = flag[3:]
+	} else {
+		return "", ""
+	}
+	if len(confPart) == 0 {
+		return "", ""
+	}
+	idx := strings.Index(confPart, "=")
+	if idx == -1 {
+		return confPart, "true"
+	}
+	return confPart[0:idx], confPart[idx+1:]
+}
+
+// Build a new configuration object from the provided conf.Builder.
+func (bld *Builder) Build() (*Config, error) {
+	// Load values and defaults
+	cnf := Config{}
+	cnf.settings = make(map[string]string)
+	if bld.Values != nil {
+		for k, v := range bld.Values {
+			cnf.settings[k] = v
+		}
+	}
+	cnf.defaults = make(map[string]string)
+	if bld.Defaults != nil {
+		for k, v := range bld.Defaults {
+			cnf.defaults[k] = v
+		}
+	}
+
+	// Process the configuration file, if we have one
+	if bld.Reader != nil {
+		parseXml(bld.Reader, cnf.settings)
+	}
+
+	// Process command line arguments
+	var i int
+	for i < len(bld.Argv) {
+		str := bld.Argv[i]
+		key, val := parseAsConfigFlag(str)
+		if key != "" {
+			cnf.settings[key] = val
+			bld.Argv = append(bld.Argv[:i], bld.Argv[i+1:]...)
+		} else {
+			i++
+		}
+	}
+	cnf.settings = bld.removeApplicationPrefixes(cnf.settings)
+	cnf.defaults = bld.removeApplicationPrefixes(cnf.defaults)
+	return &cnf, nil
+}
+
+func (bld *Builder) removeApplicationPrefixes(in map[string]string) map[string]string {
+	out := make(map[string]string)
+	for k, v := range in {
+		if strings.HasPrefix(k, bld.AppPrefix) {
+			out[k[len(bld.AppPrefix):]] = v
+		} else {
+			out[k] = v
+		}
+	}
+	return out
+}
+
+// Returns true if the configuration has a non-default value for the given key.
+func (cnf *Config) Contains(key string) bool {
+	_, ok := cnf.settings[key]
+	return ok
+}
+
+// Get a string configuration key.
+func (cnf *Config) Get(key string) string {
+	ret, hadKey := cnf.settings[key]
+	if hadKey {
+		return ret
+	}
+	return cnf.defaults[key]
+}
+
+// Get a boolean configuration key.
+func (cnf *Config) GetBool(key string) bool {
+	str := cnf.settings[key]
+	ret, err := strconv.ParseBool(str)
+	if err == nil {
+		return ret
+	}
+	str = cnf.defaults[key]
+	ret, err = strconv.ParseBool(str)
+	if err == nil {
+		return ret
+	}
+	return false
+}
+
+// Get an integer configuration key.
+func (cnf *Config) GetInt(key string) int {
+	str := cnf.settings[key]
+	ret, err := strconv.Atoi(str)
+	if err == nil {
+		return ret
+	}
+	str = cnf.defaults[key]
+	ret, err = strconv.Atoi(str)
+	if err == nil {
+		return ret
+	}
+	return 0
+}
+
+// Get an int64 configuration key.
+func (cnf *Config) GetInt64(key string) int64 {
+	str := cnf.settings[key]
+	ret, err := strconv.ParseInt(str, 10, 64)
+	if err == nil {
+		return ret
+	}
+	str = cnf.defaults[key]
+	ret, err = strconv.ParseInt(str, 10, 64)
+	if err == nil {
+		return ret
+	}
+	return 0
+}
+
+// Make a deep copy of the given configuration.
+// Optionally, you can specify particular key/value pairs to change.
+// Example:
+// cnf2 := cnf.Copy("my.changed.key", "my.new.value")
+func (cnf *Config) Clone(args ...string) *Config {
+	if len(args)%2 != 0 {
+		panic("The arguments to Config#copy are key1, value1, " +
+			"key2, value2, and so on.  You must specify an even number of arguments.")
+	}
+	ncnf := &Config{defaults: cnf.defaults}
+	ncnf.settings = make(map[string]string)
+	for k, v := range cnf.settings {
+		ncnf.settings[k] = v
+	}
+	for i := 0; i < len(args); i += 2 {
+		ncnf.settings[args[i]] = args[i+1]
+	}
+	return ncnf
+}
+
+// Export the configuration as a map
+func (cnf *Config) Export() map[string]string {
+	m := make(map[string]string)
+	for k, v := range cnf.defaults {
+		m[k] = v
+	}
+	for k, v := range cnf.settings {
+		m[k] = v
+	}
+	return m
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/conf/config_keys.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/conf/config_keys.go b/htrace-htraced/go/src/htrace/conf/config_keys.go
new file mode 100644
index 0000000..08e2de4
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/conf/config_keys.go
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package conf
+
+import (
+	"fmt"
+	"os"
+)
+
+//
+// Configuration keys for HTrace.
+//
+
+// The platform-specific path separator.  Usually slash.
+var PATH_SEP string = fmt.Sprintf("%c", os.PathSeparator)
+
+// The platform-specific path list separator.  Usually colon.
+var PATH_LIST_SEP string = fmt.Sprintf("%c", os.PathListSeparator)
+
+// The name of the XML configuration file to look for.
+const CONFIG_FILE_NAME = "htraced-conf.xml"
+
+// An environment variable containing a list of paths to search for the
+// configuration file in.
+const HTRACED_CONF_DIR = "HTRACED_CONF_DIR"
+
+// The web address to start the REST server on.
+const HTRACE_WEB_ADDRESS = "web.address"
+
+// The default port for the Htrace web address.
+const HTRACE_WEB_ADDRESS_DEFAULT_PORT = 9096
+
+// The web address to start the REST server on.
+const HTRACE_HRPC_ADDRESS = "hrpc.address"
+
+// The default port for the Htrace HRPC address.
+const HTRACE_HRPC_ADDRESS_DEFAULT_PORT = 9075
+
+// The directories to put the data store into.  Separated by PATH_LIST_SEP.
+const HTRACE_DATA_STORE_DIRECTORIES = "data.store.directories"
+
+// Boolean key which indicates whether we should clear data on startup.
+const HTRACE_DATA_STORE_CLEAR = "data.store.clear"
+
+// How many writes to buffer before applying backpressure to span senders.
+const HTRACE_DATA_STORE_SPAN_BUFFER_SIZE = "data.store.span.buffer.size"
+
+// Path to put the logs from htrace, or the empty string to use stdout.
+const HTRACE_LOG_PATH = "log.path"
+
+// The log level to use for the logs in htrace.
+const HTRACE_LOG_LEVEL = "log.level"
+
+// The period between datastore heartbeats.  This is the approximate interval at which we will
+// prune expired spans.
+const HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS = "datastore.heartbeat.period.ms"
+
+// The maximum number of addresses for which we will maintain metrics.
+const HTRACE_METRICS_MAX_ADDR_ENTRIES = "metrics.max.addr.entries"
+
+// The number of milliseconds we should keep spans before discarding them.
+const HTRACE_SPAN_EXPIRY_MS = "span.expiry.ms"
+
+// The period between updates to the span reaper
+const HTRACE_REAPER_HEARTBEAT_PERIOD_MS = "reaper.heartbeat.period.ms"
+
+// A host:port pair to send information to on startup.  This is used in unit
+// tests to determine the (random) port of the htraced process that has been
+// started.
+const HTRACE_STARTUP_NOTIFICATION_ADDRESS = "startup.notification.address"
+
+// The maximum number of HRPC handler goroutines we will create at once.  If
+// this is too small, we won't get enough concurrency; if it's too big, we will
+// buffer too much data in memory while waiting for the datastore to process
+// requests.
+const HTRACE_NUM_HRPC_HANDLERS = "num.hrpc.handlers"
+
+// The I/O timeout HRPC will use, in milliseconds.  If it takes longer than
+// this to read or write a message, we will abort the connection.
+const HTRACE_HRPC_IO_TIMEOUT_MS = "hrpc.io.timeout.ms"
+
+// The leveldb write buffer size, or 0 to use the library default, which is 4
+// MB in leveldb 1.16.  See leveldb's options.h for more details.
+const HTRACE_LEVELDB_WRITE_BUFFER_SIZE = "leveldb.write.buffer.size"
+
+// The LRU cache size for leveldb, in bytes.
+const HTRACE_LEVELDB_CACHE_SIZE = "leveldb.cache.size"
+
+// Default values for HTrace configuration keys.
+var DEFAULTS = map[string]string{
+	HTRACE_WEB_ADDRESS:  fmt.Sprintf("0.0.0.0:%d", HTRACE_WEB_ADDRESS_DEFAULT_PORT),
+	HTRACE_HRPC_ADDRESS: fmt.Sprintf("0.0.0.0:%d", HTRACE_HRPC_ADDRESS_DEFAULT_PORT),
+	HTRACE_DATA_STORE_DIRECTORIES: PATH_SEP + "tmp" + PATH_SEP + "htrace1" +
+		PATH_LIST_SEP + PATH_SEP + "tmp" + PATH_SEP + "htrace2",
+	HTRACE_DATA_STORE_CLEAR:              "false",
+	HTRACE_DATA_STORE_SPAN_BUFFER_SIZE:   "100",
+	HTRACE_LOG_PATH:                      "",
+	HTRACE_LOG_LEVEL:                     "INFO",
+	HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: fmt.Sprintf("%d", 45*1000),
+	HTRACE_METRICS_MAX_ADDR_ENTRIES:      "100000",
+	HTRACE_SPAN_EXPIRY_MS:                "0",
+	HTRACE_REAPER_HEARTBEAT_PERIOD_MS:    fmt.Sprintf("%d", 90*1000),
+	HTRACE_NUM_HRPC_HANDLERS:             "20",
+	HTRACE_HRPC_IO_TIMEOUT_MS:            "60000",
+	HTRACE_LEVELDB_WRITE_BUFFER_SIZE:     "0",
+	HTRACE_LEVELDB_CACHE_SIZE:            fmt.Sprintf("%d", 100*1024*1024),
+}
+
+// Values to be used when creating test configurations
+func TEST_VALUES() map[string]string {
+	return map[string]string{
+		HTRACE_HRPC_ADDRESS:   ":0",    // use a random port for the HRPC server
+		HTRACE_LOG_LEVEL:      "TRACE", // show all log messages in tests
+		HTRACE_WEB_ADDRESS:    ":0",    // use a random port for the REST server
+		HTRACE_SPAN_EXPIRY_MS: "0",     // never time out spans (unless testing the reaper)
+	}
+}



[4/7] incubator-htrace git commit: HTRACE-357. Rename htrace-htraced/go/src/org/apache/htrace to htrace-htraced/go/src/htrace (Colin Patrick McCabe via iwasakims)

Posted by iw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htracedTool/file_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htracedTool/file_test.go b/htrace-htraced/go/src/htrace/htracedTool/file_test.go
new file mode 100644
index 0000000..f955add
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htracedTool/file_test.go
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"errors"
+	"htrace/common"
+	"htrace/conf"
+	"io"
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+)
+
+func TestInputFileAndOutputFile(t *testing.T) {
+	tdir, err := ioutil.TempDir(os.TempDir(), "TestInputFileAndOutputFile")
+	if err != nil {
+		t.Fatalf("failed to create TempDir: %s\n", err.Error())
+	}
+	defer os.RemoveAll(tdir)
+	tpath := tdir + conf.PATH_SEP + "test"
+	var ofile *OutputFile
+	ofile, err = CreateOutputFile(tpath)
+	if err != nil {
+		t.Fatalf("failed to create OutputFile at %s: %s\n", tpath, err.Error())
+	}
+	defer func() {
+		if ofile != nil {
+			ofile.Close()
+		}
+	}()
+	w := NewFailureDeferringWriter(ofile)
+	w.Printf("Hello, world!\n")
+	w.Printf("2 + 2 = %d\n", 4)
+	if w.Error() != nil {
+		t.Fatalf("got unexpected error writing to %s: %s\n", tpath, w.Error().Error())
+	}
+	err = ofile.Close()
+	ofile = nil
+	if err != nil {
+		t.Fatalf("error on closing OutputFile for %s: %s\n", tpath, err.Error())
+	}
+	var ifile *InputFile
+	ifile, err = OpenInputFile(tpath)
+	defer ifile.Close()
+	expected := "Hello, world!\n2 + 2 = 4\n"
+	buf := make([]byte, len(expected))
+	_, err = io.ReadAtLeast(ifile, buf, len(buf))
+	if err != nil {
+		t.Fatalf("unexpected error on reading %s: %s\n", tpath, err.Error())
+	}
+	str := string(buf)
+	if str != expected {
+		t.Fatalf("Could not read back what we wrote to %s.\n"+
+			"Got:\n%s\nExpected:\n%s\n", tpath, str, expected)
+	}
+}
+
+type LimitedBufferWriter struct {
+	buf []byte
+	off int
+}
+
+const LIMITED_BUFFER_MESSAGE = "There isn't enough buffer to go around!"
+
+func (w *LimitedBufferWriter) Write(p []byte) (int, error) {
+	var nwritten int
+	for i := range p {
+		if w.off >= len(w.buf) {
+			return nwritten, errors.New(LIMITED_BUFFER_MESSAGE)
+		}
+		w.buf[w.off] = p[i]
+		w.off = w.off + 1
+		nwritten++
+	}
+	return nwritten, nil
+}
+
+func TestFailureDeferringWriter(t *testing.T) {
+	lw := LimitedBufferWriter{buf: make([]byte, 20), off: 0}
+	w := NewFailureDeferringWriter(&lw)
+	w.Printf("Zippity do dah #%d\n", 1)
+	w.Printf("Zippity do dah #%d\n", 2)
+	if w.Error() == nil {
+		t.Fatalf("expected FailureDeferringWriter to experience a failure due to " +
+			"limited buffer size, but it did not.")
+	}
+	if w.Error().Error() != LIMITED_BUFFER_MESSAGE {
+		t.Fatalf("expected FailureDeferringWriter to have the error message %s, but "+
+			"the message was %s\n", LIMITED_BUFFER_MESSAGE, w.Error().Error())
+	}
+	expected := "Zippity do dah #1\nZi"
+	if string(lw.buf) != expected {
+		t.Fatalf("expected LimitedBufferWriter to contain %s, but it contained %s "+
+			"instead.\n", expected, string(lw.buf))
+	}
+}
+
+func TestReadSpans(t *testing.T) {
+	SPAN_TEST_STR := `{"a":"b9f2a1e07b6e4f16b0c2b27303b20e79",` +
+		`"b":1424736225037,"e":1424736225901,"d":"ClientNamenodeProtocol#getFileInfo",` +
+		`"r":"FsShell","p":["3afebdc0a13f4feb811cc5c0e42d30b1"]}
+{"a":"3afebdc0a13f4feb811cc5c0e42d30b1","b":1424736224969,` +
+		`"e":1424736225960,"d":"getFileInfo","r":"FsShell","p":[],"n":{"path":"/"}}
+`
+	r := strings.NewReader(SPAN_TEST_STR)
+	spans, err := readSpans(r)
+	if err != nil {
+		t.Fatalf("Failed to read spans from string via readSpans: %s\n", err.Error())
+	}
+	SPAN_TEST_EXPECTED := common.SpanSlice{
+		&common.Span{
+			Id: common.TestId("b9f2a1e07b6e4f16b0c2b27303b20e79"),
+			SpanData: common.SpanData{
+				Begin:       1424736225037,
+				End:         1424736225901,
+				Description: "ClientNamenodeProtocol#getFileInfo",
+				TracerId:    "FsShell",
+				Parents:     []common.SpanId{common.TestId("3afebdc0a13f4feb811cc5c0e42d30b1")},
+			},
+		},
+		&common.Span{
+			Id: common.TestId("3afebdc0a13f4feb811cc5c0e42d30b1"),
+			SpanData: common.SpanData{
+				Begin:       1424736224969,
+				End:         1424736225960,
+				Description: "getFileInfo",
+				TracerId:    "FsShell",
+				Parents:     []common.SpanId{},
+				Info: common.TraceInfoMap{
+					"path": "/",
+				},
+			},
+		},
+	}
+	if len(spans) != len(SPAN_TEST_EXPECTED) {
+		t.Fatalf("Expected %d spans, but got %d\n",
+			len(SPAN_TEST_EXPECTED), len(spans))
+	}
+	for i := range SPAN_TEST_EXPECTED {
+		common.ExpectSpansEqual(t, spans[i], SPAN_TEST_EXPECTED[i])
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htracedTool/graph.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htracedTool/graph.go b/htrace-htraced/go/src/htrace/htracedTool/graph.go
new file mode 100644
index 0000000..b238f11
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htracedTool/graph.go
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bufio"
+	"errors"
+	"fmt"
+	"htrace/common"
+	"io"
+	"os"
+	"sort"
+)
+
+// Create a dotfile from a json file.
+func jsonSpanFileToDotFile(jsonFile string, dotFile string) error {
+	spans, err := readSpansFile(jsonFile)
+	if err != nil {
+		return errors.New(fmt.Sprintf("error reading %s: %s",
+			jsonFile, err.Error()))
+	}
+	var file *OutputFile
+	file, err = CreateOutputFile(dotFile)
+	if err != nil {
+		return errors.New(fmt.Sprintf("error opening %s for write: %s",
+			dotFile, err.Error()))
+	}
+	defer func() {
+		if file != nil {
+			file.Close()
+		}
+	}()
+	writer := bufio.NewWriter(file)
+	err = spansToDot(spans, writer)
+	if err != nil {
+		return err
+	}
+	err = writer.Flush()
+	if err != nil {
+		return err
+	}
+	err = file.Close()
+	file = nil
+	return err
+}
+
+// Create output in dotfile format from a set of spans.
+func spansToDot(spans common.SpanSlice, writer io.Writer) error {
+	sort.Sort(spans)
+	idMap := make(map[[16]byte]*common.Span)
+	for i := range spans {
+		span := spans[i]
+		if idMap[span.Id.ToArray()] != nil {
+			fmt.Fprintf(os.Stderr, "There were multiple spans listed which "+
+				"had ID %s.\nFirst:%s\nOther:%s\n", span.Id.String(),
+				idMap[span.Id.ToArray()].ToJson(), span.ToJson())
+		} else {
+			idMap[span.Id.ToArray()] = span
+		}
+	}
+	childMap := make(map[[16]byte]common.SpanSlice)
+	for i := range spans {
+		child := spans[i]
+		for j := range child.Parents {
+			parent := idMap[child.Parents[j].ToArray()]
+			if parent == nil {
+				fmt.Fprintf(os.Stderr, "Can't find parent id %s for %s\n",
+					child.Parents[j].String(), child.ToJson())
+			} else {
+				children := childMap[parent.Id.ToArray()]
+				if children == nil {
+					children = make(common.SpanSlice, 0)
+				}
+				children = append(children, child)
+				childMap[parent.Id.ToArray()] = children
+			}
+		}
+	}
+	w := NewFailureDeferringWriter(writer)
+	w.Printf("digraph spans {\n")
+	// Write out the nodes with their descriptions.
+	for i := range spans {
+		w.Printf(fmt.Sprintf(`  "%s" [label="%s"];`+"\n",
+			spans[i].Id.String(), spans[i].Description))
+	}
+	// Write out the edges between nodes... the parent/children relationships
+	for i := range spans {
+		children := childMap[spans[i].Id.ToArray()]
+		sort.Sort(children)
+		if children != nil {
+			for c := range children {
+				w.Printf(fmt.Sprintf(`  "%s" -> "%s";`+"\n",
+					spans[i].Id.String(), children[c].Id))
+			}
+		}
+	}
+	w.Printf("}\n")
+	return w.Error()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htracedTool/graph_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htracedTool/graph_test.go b/htrace-htraced/go/src/htrace/htracedTool/graph_test.go
new file mode 100644
index 0000000..af6d262
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htracedTool/graph_test.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bytes"
+	"htrace/common"
+	"testing"
+)
+
+func TestSpansToDot(t *testing.T) {
+	TEST_SPANS := common.SpanSlice{
+		&common.Span{
+			Id: common.TestId("814c8ee0e7984be3a8af00ac64adccb6"),
+			SpanData: common.SpanData{
+				Begin:       1424813349020,
+				End:         1424813349134,
+				Description: "newDFSInputStream",
+				TracerId:    "FsShell",
+				Parents:     []common.SpanId{},
+				Info: common.TraceInfoMap{
+					"path": "/",
+				},
+			},
+		},
+		&common.Span{
+			Id: common.TestId("cf2d5de696454548bc055d1e6024054c"),
+			SpanData: common.SpanData{
+				Begin:       1424813349025,
+				End:         1424813349133,
+				Description: "getBlockLocations",
+				TracerId:    "FsShell",
+				Parents:     []common.SpanId{common.TestId("814c8ee0e7984be3a8af00ac64adccb6")},
+			},
+		},
+		&common.Span{
+			Id: common.TestId("37623806f9c64483b834b8ea5d6b4827"),
+			SpanData: common.SpanData{
+				Begin:       1424813349027,
+				End:         1424813349073,
+				Description: "ClientNamenodeProtocol#getBlockLocations",
+				TracerId:    "FsShell",
+				Parents:     []common.SpanId{common.TestId("cf2d5de696454548bc055d1e6024054c")},
+			},
+		},
+	}
+	w := bytes.NewBuffer(make([]byte, 0, 2048))
+	err := spansToDot(TEST_SPANS, w)
+	if err != nil {
+		t.Fatalf("spansToDot failed: error %s\n", err.Error())
+	}
+	EXPECTED_STR := `digraph spans {
+  "37623806f9c64483b834b8ea5d6b4827" [label="ClientNamenodeProtocol#getBlockLocations"];
+  "814c8ee0e7984be3a8af00ac64adccb6" [label="newDFSInputStream"];
+  "cf2d5de696454548bc055d1e6024054c" [label="getBlockLocations"];
+  "814c8ee0e7984be3a8af00ac64adccb6" -> "cf2d5de696454548bc055d1e6024054c";
+  "cf2d5de696454548bc055d1e6024054c" -> "37623806f9c64483b834b8ea5d6b4827";
+}
+`
+	if w.String() != EXPECTED_STR {
+		t.Fatalf("Expected to get:\n%s\nGot:\n%s\n", EXPECTED_STR, w.String())
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htracedTool/queries.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htracedTool/queries.go b/htrace-htraced/go/src/htrace/htracedTool/queries.go
new file mode 100644
index 0000000..3111c62
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htracedTool/queries.go
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	htrace "htrace/client"
+	"htrace/common"
+	"strings"
+	"unicode"
+)
+
+// Convert a string into a whitespace-separated sequence of strings.
+func tokenize(str string) []string {
+	prevQuote := rune(0)
+	f := func(c rune) bool {
+		switch {
+		case c == prevQuote:
+			prevQuote = rune(0)
+			return true
+		case prevQuote != rune(0):
+			return false
+		case unicode.In(c, unicode.Quotation_Mark):
+			prevQuote = c
+			return true
+		default:
+			return unicode.IsSpace(c)
+		}
+	}
+	return strings.FieldsFunc(str, f)
+}
+
+// Parses a query string in the format of a series of
+// [TYPE] [OPERATOR] [CONST] tuples, joined by AND statements.
+type predicateParser struct {
+	tokens   []string
+	curToken int
+}
+
+func (ps *predicateParser) Parse() (*common.Predicate, error) {
+	if ps.curToken >= len(ps.tokens) {
+		return nil, nil
+	}
+	if ps.curToken > 0 {
+		if strings.ToLower(ps.tokens[ps.curToken]) != "and" {
+			return nil, errors.New(fmt.Sprintf("Error parsing on token %d: "+
+				"expected predicates to be joined by 'and', but found '%s'",
+				ps.curToken, ps.tokens[ps.curToken]))
+		}
+		ps.curToken++
+		if ps.curToken > len(ps.tokens) {
+			return nil, errors.New(fmt.Sprintf("Nothing found after 'and' at "+
+				"token %d", ps.curToken))
+		}
+	}
+	field := common.Field(strings.ToLower(ps.tokens[ps.curToken]))
+	if !field.IsValid() {
+		return nil, errors.New(fmt.Sprintf("Invalid field specifier at token %d.  "+
+			"Can't understand %s.  Valid field specifiers are %v", ps.curToken,
+			ps.tokens[ps.curToken], common.ValidFields()))
+	}
+	ps.curToken++
+	if ps.curToken > len(ps.tokens) {
+		return nil, errors.New(fmt.Sprintf("Nothing found after field specifier "+
+			"at token %d", ps.curToken))
+	}
+	op := common.Op(strings.ToLower(ps.tokens[ps.curToken]))
+	if !op.IsValid() {
+		return nil, errors.New(fmt.Sprintf("Invalid operation specifier at token %d.  "+
+			"Can't understand %s.  Valid operation specifiers are %v", ps.curToken,
+			ps.tokens[ps.curToken], common.ValidOps()))
+	}
+	ps.curToken++
+	if ps.curToken > len(ps.tokens) {
+		return nil, errors.New(fmt.Sprintf("Nothing found after field specifier "+
+			"at token %d", ps.curToken))
+	}
+	val := ps.tokens[ps.curToken]
+	ps.curToken++
+	return &common.Predicate{Op: op, Field: field, Val: val}, nil
+}
+
+func parseQueryString(str string) ([]common.Predicate, error) {
+	ps := predicateParser{tokens: tokenize(str)}
+	if verbose {
+		fmt.Printf("Running query [ ")
+		prefix := ""
+		for tokenIdx := range ps.tokens {
+			fmt.Printf("%s'%s'", prefix, ps.tokens[tokenIdx])
+			prefix = ", "
+		}
+		fmt.Printf(" ]\n")
+	}
+	preds := make([]common.Predicate, 0)
+	for {
+		pred, err := ps.Parse()
+		if err != nil {
+			return nil, err
+		}
+		if pred == nil {
+			break
+		}
+		preds = append(preds, *pred)
+	}
+	if len(preds) == 0 {
+		return nil, errors.New("Empty query string")
+	}
+	return preds, nil
+}
+
+// Send a query from a query string.
+func doQueryFromString(hcl *htrace.Client, str string, lim int) error {
+	query := &common.Query{Lim: lim}
+	var err error
+	query.Predicates, err = parseQueryString(str)
+	if err != nil {
+		return err
+	}
+	return doQuery(hcl, query)
+}
+
+// Send a query from a raw JSON string.
+func doRawQuery(hcl *htrace.Client, str string) error {
+	jsonBytes := []byte(str)
+	var query common.Query
+	err := json.Unmarshal(jsonBytes, &query)
+	if err != nil {
+		return errors.New(fmt.Sprintf("Error parsing provided JSON: %s\n", err.Error()))
+	}
+	return doQuery(hcl, &query)
+}
+
+// Send a query.
+func doQuery(hcl *htrace.Client, query *common.Query) error {
+	if verbose {
+		qbytes, err := json.Marshal(*query)
+		if err != nil {
+			qbytes = []byte("marshaling error: " + err.Error())
+		}
+		fmt.Printf("Sending query: %s\n", string(qbytes))
+	}
+	spans, err := hcl.Query(query)
+	if err != nil {
+		return err
+	}
+	if verbose {
+		fmt.Printf("%d results...\n", len(spans))
+	}
+	for i := range spans {
+		fmt.Printf("%s\n", spans[i].ToJson())
+	}
+	return nil
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htracedTool/query_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htracedTool/query_test.go b/htrace-htraced/go/src/htrace/htracedTool/query_test.go
new file mode 100644
index 0000000..55aff91
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htracedTool/query_test.go
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"encoding/json"
+	"htrace/common"
+	"reflect"
+	"testing"
+)
+
+func predsToStr(preds []common.Predicate) string {
+	b, err := json.MarshalIndent(preds, "", "  ")
+	if err != nil {
+		return "JSON marshaling error: " + err.Error()
+	}
+	return string(b)
+}
+
+func checkParseQueryString(t *testing.T, str string, epreds []common.Predicate) {
+	preds, err := parseQueryString(str)
+	if err != nil {
+		t.Fatalf("got unexpected parseQueryString error: %s\n", err.Error())
+	}
+	if !reflect.DeepEqual(preds, epreds) {
+		t.Fatalf("Unexpected result from parseQueryString.  "+
+			"Expected: %s, got: %s\n", predsToStr(epreds), predsToStr(preds))
+	}
+}
+
+func TestParseQueryString(t *testing.T) {
+	verbose = testing.Verbose()
+	checkParseQueryString(t, "description eq ls", []common.Predicate{
+		common.Predicate{
+			Op:    common.EQUALS,
+			Field: common.DESCRIPTION,
+			Val:   "ls",
+		},
+	})
+	checkParseQueryString(t, "begin gt 123 and end le 456", []common.Predicate{
+		common.Predicate{
+			Op:    common.GREATER_THAN,
+			Field: common.BEGIN_TIME,
+			Val:   "123",
+		},
+		common.Predicate{
+			Op:    common.LESS_THAN_OR_EQUALS,
+			Field: common.END_TIME,
+			Val:   "456",
+		},
+	})
+	checkParseQueryString(t, `DESCRIPTION cn "Foo Bar" and `+
+		`BEGIN ge "999" and SPANID eq "4565d8abc4f70ac1216a3f1834c6860b"`,
+		[]common.Predicate{
+			common.Predicate{
+				Op:    common.CONTAINS,
+				Field: common.DESCRIPTION,
+				Val:   "Foo Bar",
+			},
+			common.Predicate{
+				Op:    common.GREATER_THAN_OR_EQUALS,
+				Field: common.BEGIN_TIME,
+				Val:   "999",
+			},
+			common.Predicate{
+				Op:    common.EQUALS,
+				Field: common.SPAN_ID,
+				Val:   "4565d8abc4f70ac1216a3f1834c6860b",
+			},
+		})
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/test/random.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/test/random.go b/htrace-htraced/go/src/htrace/test/random.go
new file mode 100644
index 0000000..ad3104c
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/test/random.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package test
+
+import (
+	"fmt"
+	"htrace/common"
+	"math/rand"
+)
+
+func NonZeroRand64(rnd *rand.Rand) int64 {
+	for {
+		r := rnd.Int63()
+		if r == 0 {
+			continue
+		}
+		if rnd.Intn(1) != 0 {
+			return -r
+		}
+		return r
+	}
+}
+
+func NonZeroRandSpanId(rnd *rand.Rand) common.SpanId {
+	var id common.SpanId
+	id = make([]byte, 16)
+	for i := 0; i < len(id); i++ {
+		id[i] = byte(rnd.Intn(0x100))
+	}
+	return id
+}
+
+func NonZeroRand32(rnd *rand.Rand) int32 {
+	for {
+		r := rnd.Int31()
+		if r == 0 {
+			continue
+		}
+		if rnd.Intn(1) != 0 {
+			return -r
+		}
+		return r
+	}
+}
+
+// Create a random span.
+func NewRandomSpan(rnd *rand.Rand, potentialParents []*common.Span) *common.Span {
+	parents := []common.SpanId{}
+	if potentialParents != nil {
+		parentIdx := rnd.Intn(len(potentialParents) + 1)
+		if parentIdx < len(potentialParents) {
+			parents = []common.SpanId{potentialParents[parentIdx].Id}
+		}
+	}
+	return &common.Span{Id: NonZeroRandSpanId(rnd),
+		SpanData: common.SpanData{
+			Begin:       NonZeroRand64(rnd),
+			End:         NonZeroRand64(rnd),
+			Description: "getFileDescriptors",
+			Parents:     parents,
+			TracerId:    fmt.Sprintf("tracer%d", NonZeroRand32(rnd)),
+		}}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/client/client.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/client/client.go b/htrace-htraced/go/src/org/apache/htrace/client/client.go
deleted file mode 100644
index a2a6f8b..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/client/client.go
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package client
-
-import (
-	"bytes"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-)
-
-// A golang client for htraced.
-// TODO: fancier APIs for streaming spans in the background, optimize TCP stuff
-func NewClient(cnf *conf.Config, testHooks *TestHooks) (*Client, error) {
-	hcl := Client{testHooks: testHooks}
-	hcl.restAddr = cnf.Get(conf.HTRACE_WEB_ADDRESS)
-	if testHooks != nil && testHooks.HrpcDisabled {
-		hcl.hrpcAddr = ""
-	} else {
-		hcl.hrpcAddr = cnf.Get(conf.HTRACE_HRPC_ADDRESS)
-	}
-	return &hcl, nil
-}
-
-type TestHooks struct {
-	// If true, HRPC is disabled.
-	HrpcDisabled bool
-
-	// A function which gets called after we connect to the server and send the
-	// message frame, but before we write the message body.
-	HandleWriteRequestBody func()
-}
-
-type Client struct {
-	// REST address of the htraced server.
-	restAddr string
-
-	// HRPC address of the htraced server.
-	hrpcAddr string
-
-	// The test hooks to use, or nil if test hooks are not enabled.
-	testHooks *TestHooks
-}
-
-// Get the htraced server version information.
-func (hcl *Client) GetServerVersion() (*common.ServerVersion, error) {
-	buf, _, err := hcl.makeGetRequest("server/info")
-	if err != nil {
-		return nil, err
-	}
-	var info common.ServerVersion
-	err = json.Unmarshal(buf, &info)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
-			"body %s: %s", string(buf), err.Error()))
-	}
-	return &info, nil
-}
-
-// Get the htraced server debug information.
-func (hcl *Client) GetServerDebugInfo() (*common.ServerDebugInfo, error) {
-	buf, _, err := hcl.makeGetRequest("server/debugInfo")
-	if err != nil {
-		return nil, err
-	}
-	var debugInfo common.ServerDebugInfo
-	err = json.Unmarshal(buf, &debugInfo)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
-			"body %s: %s", string(buf), err.Error()))
-	}
-	return &debugInfo, nil
-}
-
-// Get the htraced server statistics.
-func (hcl *Client) GetServerStats() (*common.ServerStats, error) {
-	buf, _, err := hcl.makeGetRequest("server/stats")
-	if err != nil {
-		return nil, err
-	}
-	var stats common.ServerStats
-	err = json.Unmarshal(buf, &stats)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
-			"body %s: %s", string(buf), err.Error()))
-	}
-	return &stats, nil
-}
-
-// Get the htraced server statistics.
-func (hcl *Client) GetServerConf() (map[string]string, error) {
-	buf, _, err := hcl.makeGetRequest("server/conf")
-	if err != nil {
-		return nil, err
-	}
-	cnf := make(map[string]string)
-	err = json.Unmarshal(buf, &cnf)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
-			"body %s: %s", string(buf), err.Error()))
-	}
-	return cnf, nil
-}
-
-// Get information about a trace span.  Returns nil, nil if the span was not found.
-func (hcl *Client) FindSpan(sid common.SpanId) (*common.Span, error) {
-	buf, rc, err := hcl.makeGetRequest(fmt.Sprintf("span/%s", sid.String()))
-	if err != nil {
-		if rc == http.StatusNoContent {
-			return nil, nil
-		}
-		return nil, err
-	}
-	var span common.Span
-	err = json.Unmarshal(buf, &span)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error unmarshalling response "+
-			"body %s: %s", string(buf), err.Error()))
-	}
-	return &span, nil
-}
-
-func (hcl *Client) WriteSpans(spans []*common.Span) error {
-	if hcl.hrpcAddr == "" {
-		return hcl.writeSpansHttp(spans)
-	}
-	hcr, err := newHClient(hcl.hrpcAddr, hcl.testHooks)
-	if err != nil {
-		return err
-	}
-	defer hcr.Close()
-	return hcr.writeSpans(spans)
-}
-
-func (hcl *Client) writeSpansHttp(spans []*common.Span) error {
-	req := common.WriteSpansReq {
-		NumSpans: len(spans),
-	}
-	var w bytes.Buffer
-	enc := json.NewEncoder(&w)
-	err := enc.Encode(req)
-	if err != nil {
-		return errors.New(fmt.Sprintf("Error serializing WriteSpansReq: %s",
-			err.Error()))
-	}
-	for spanIdx := range(spans) {
-		err := enc.Encode(spans[spanIdx])
-		if err != nil {
-			return errors.New(fmt.Sprintf("Error serializing span %d out " +
-				"of %d: %s", spanIdx, len(spans), err.Error()))
-		}
-	}
-	_, _, err = hcl.makeRestRequest("POST", "writeSpans", &w)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// Find the child IDs of a given span ID.
-func (hcl *Client) FindChildren(sid common.SpanId, lim int) ([]common.SpanId, error) {
-	buf, _, err := hcl.makeGetRequest(fmt.Sprintf("span/%s/children?lim=%d",
-		sid.String(), lim))
-	if err != nil {
-		return nil, err
-	}
-	var spanIds []common.SpanId
-	err = json.Unmarshal(buf, &spanIds)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error: error unmarshalling response "+
-			"body %s: %s", string(buf), err.Error()))
-	}
-	return spanIds, nil
-}
-
-// Make a query
-func (hcl *Client) Query(query *common.Query) ([]common.Span, error) {
-	in, err := json.Marshal(query)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error marshalling query: %s", err.Error()))
-	}
-	var out []byte
-	var url = fmt.Sprintf("query?query=%s", in)
-	out, _, err = hcl.makeGetRequest(url)
-	if err != nil {
-		return nil, err
-	}
-	var spans []common.Span
-	err = json.Unmarshal(out, &spans)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error unmarshalling results: %s", err.Error()))
-	}
-	return spans, nil
-}
-
-func (hcl *Client) makeGetRequest(reqName string) ([]byte, int, error) {
-	return hcl.makeRestRequest("GET", reqName, nil)
-}
-
-// Make a general JSON REST request.
-// Returns the request body, the response code, and the error.
-// Note: if the response code is non-zero, the error will also be non-zero.
-func (hcl *Client) makeRestRequest(reqType string, reqName string,
-	reqBody io.Reader) ([]byte, int, error) {
-	url := fmt.Sprintf("http://%s/%s",
-		hcl.restAddr, reqName)
-	req, err := http.NewRequest(reqType, url, reqBody)
-	req.Header.Set("Content-Type", "application/json")
-	client := &http.Client{}
-	resp, err := client.Do(req)
-	if err != nil {
-		return nil, -1, errors.New(fmt.Sprintf("Error: error making http request to %s: %s\n", url,
-			err.Error()))
-	}
-	defer resp.Body.Close()
-	body, err2 := ioutil.ReadAll(resp.Body)
-	if err2 != nil {
-		return nil, -1, errors.New(fmt.Sprintf("Error: error reading response body: %s\n", err2.Error()))
-	}
-	if resp.StatusCode != http.StatusOK {
-		return nil, resp.StatusCode,
-			errors.New(fmt.Sprintf("Error: got bad response status from " +
-				"%s: %s\n%s\n", url, resp.Status, body))
-	}
-	return body, 0, nil
-}
-
-// Dump all spans from the htraced daemon.
-func (hcl *Client) DumpAll(lim int, out chan *common.Span) error {
-	defer func() {
-		close(out)
-	}()
-	searchId := common.INVALID_SPAN_ID
-	for {
-		q := common.Query{
-			Lim: lim,
-			Predicates: []common.Predicate{
-				common.Predicate{
-					Op:    "ge",
-					Field: "spanid",
-					Val:   searchId.String(),
-				},
-			},
-		}
-		spans, err := hcl.Query(&q)
-		if err != nil {
-			return errors.New(fmt.Sprintf("Error querying spans with IDs at or after "+
-				"%s: %s", searchId.String(), err.Error()))
-		}
-		if len(spans) == 0 {
-			return nil
-		}
-		for i := range spans {
-			out <- &spans[i]
-		}
-		searchId = spans[len(spans)-1].Id.Next()
-	}
-}
-
-func (hcl *Client) Close() {
-	hcl.restAddr = ""
-	hcl.hrpcAddr = ""
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/client/hclient.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/client/hclient.go b/htrace-htraced/go/src/org/apache/htrace/client/hclient.go
deleted file mode 100644
index 43f0c6c..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/client/hclient.go
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package client
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"github.com/ugorji/go/codec"
-	"io"
-	"net"
-	"net/rpc"
-	"org/apache/htrace/common"
-)
-
-type hClient struct {
-	rpcClient *rpc.Client
-}
-
-type HrpcClientCodec struct {
-	rwc       io.ReadWriteCloser
-	length    uint32
-	testHooks *TestHooks
-}
-
-func (cdc *HrpcClientCodec) WriteRequest(rr *rpc.Request, msg interface{}) error {
-	methodId := common.HrpcMethodNameToId(rr.ServiceMethod)
-	if methodId == common.METHOD_ID_NONE {
-		return errors.New(fmt.Sprintf("HrpcClientCodec: Unknown method name %s",
-			rr.ServiceMethod))
-	}
-	mh := new(codec.MsgpackHandle)
-	mh.WriteExt = true
-	w := bytes.NewBuffer(make([]byte, 0, 2048))
-
-	var err error
-	enc := codec.NewEncoder(w, mh)
-	if methodId == common.METHOD_ID_WRITE_SPANS {
-		spans := msg.([]*common.Span)
-		req := &common.WriteSpansReq {
-			NumSpans: len(spans),
-		}
-		err = enc.Encode(req)
-		if err != nil {
-			return errors.New(fmt.Sprintf("HrpcClientCodec: Unable to marshal "+
-				"message as msgpack: %s", err.Error()))
-		}
-		for spanIdx := range(spans) {
-			err = enc.Encode(spans[spanIdx])
-			if err != nil {
-				return errors.New(fmt.Sprintf("HrpcClientCodec: Unable to marshal "+
-					"span %d out of %d as msgpack: %s", spanIdx, len(spans), err.Error()))
-			}
-		}
-	} else {
-		err = enc.Encode(msg)
-		if err != nil {
-			return errors.New(fmt.Sprintf("HrpcClientCodec: Unable to marshal "+
-				"message as msgpack: %s", err.Error()))
-		}
-	}
-	buf := w.Bytes()
-	if len(buf) > common.MAX_HRPC_BODY_LENGTH {
-		return errors.New(fmt.Sprintf("HrpcClientCodec: message body is %d "+
-			"bytes, but the maximum message size is %d bytes.",
-			len(buf), common.MAX_HRPC_BODY_LENGTH))
-	}
-	hdr := common.HrpcRequestHeader{
-		Magic:    common.HRPC_MAGIC,
-		MethodId: methodId,
-		Seq:      rr.Seq,
-		Length:   uint32(len(buf)),
-	}
-	err = binary.Write(cdc.rwc, binary.LittleEndian, &hdr)
-	if err != nil {
-		return errors.New(fmt.Sprintf("Error writing header bytes: %s",
-			err.Error()))
-	}
-	if cdc.testHooks != nil && cdc.testHooks.HandleWriteRequestBody != nil {
-		cdc.testHooks.HandleWriteRequestBody()
-	}
-	_, err = cdc.rwc.Write(buf)
-	if err != nil {
-		return errors.New(fmt.Sprintf("Error writing body bytes: %s",
-			err.Error()))
-	}
-	return nil
-}
-
-func (cdc *HrpcClientCodec) ReadResponseHeader(resp *rpc.Response) error {
-	hdr := common.HrpcResponseHeader{}
-	err := binary.Read(cdc.rwc, binary.LittleEndian, &hdr)
-	if err != nil {
-		return errors.New(fmt.Sprintf("Error reading response header "+
-			"bytes: %s", err.Error()))
-	}
-	resp.ServiceMethod = common.HrpcMethodIdToMethodName(hdr.MethodId)
-	if resp.ServiceMethod == "" {
-		return errors.New(fmt.Sprintf("Error reading response header: "+
-			"invalid method ID %d.", hdr.MethodId))
-	}
-	resp.Seq = hdr.Seq
-	if hdr.ErrLength > 0 {
-		if hdr.ErrLength > common.MAX_HRPC_ERROR_LENGTH {
-			return errors.New(fmt.Sprintf("Error reading response header: "+
-				"error message was %d bytes long, but "+
-				"MAX_HRPC_ERROR_LENGTH is %d.",
-				hdr.ErrLength, common.MAX_HRPC_ERROR_LENGTH))
-		}
-		buf := make([]byte, hdr.ErrLength)
-		var nread int
-		nread, err = cdc.rwc.Read(buf)
-		if uint32(nread) != hdr.ErrLength {
-			return errors.New(fmt.Sprintf("Error reading response header: "+
-				"failed to read %d bytes of error message.", nread))
-		}
-		if err != nil {
-			return errors.New(fmt.Sprintf("Error reading response header: "+
-				"failed to read %d bytes of error message: %s",
-				nread, err.Error()))
-		}
-		resp.Error = string(buf)
-	} else {
-		resp.Error = ""
-	}
-	cdc.length = hdr.Length
-	return nil
-}
-
-func (cdc *HrpcClientCodec) ReadResponseBody(body interface{}) error {
-	mh := new(codec.MsgpackHandle)
-	mh.WriteExt = true
-	dec := codec.NewDecoder(io.LimitReader(cdc.rwc, int64(cdc.length)), mh)
-	err := dec.Decode(body)
-	if err != nil {
-		return errors.New(fmt.Sprintf("Failed to read response body: %s",
-			err.Error()))
-	}
-	return nil
-}
-
-func (cdc *HrpcClientCodec) Close() error {
-	return cdc.rwc.Close()
-}
-
-func newHClient(hrpcAddr string, testHooks *TestHooks) (*hClient, error) {
-	hcr := hClient{}
-	conn, err := net.Dial("tcp", hrpcAddr)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("Error contacting the HRPC server "+
-			"at %s: %s", hrpcAddr, err.Error()))
-	}
-	hcr.rpcClient = rpc.NewClientWithCodec(&HrpcClientCodec{
-		rwc:       conn,
-		testHooks: testHooks,
-	})
-	return &hcr, nil
-}
-
-func (hcr *hClient) writeSpans(spans []*common.Span) error {
-	resp := common.WriteSpansResp{}
-	return hcr.rpcClient.Call(common.METHOD_NAME_WRITE_SPANS, spans, &resp)
-}
-
-func (hcr *hClient) Close() {
-	hcr.rpcClient.Close()
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/log.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/log.go b/htrace-htraced/go/src/org/apache/htrace/common/log.go
deleted file mode 100644
index 8cb3953..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/log.go
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"errors"
-	"fmt"
-	"log"
-	"org/apache/htrace/conf"
-	"os"
-	"path/filepath"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-)
-
-// A logSink is a place logs can be written to.
-type logSink struct {
-	path     logPath
-	file     *os.File
-	lock     sync.Mutex
-	refCount int // protected by logFilesLock
-}
-
-// Write to the logSink.
-func (sink *logSink) write(str string) {
-	sink.lock.Lock()
-	defer sink.lock.Unlock()
-	_, err := sink.file.Write([]byte(str))
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Error logging to '%s': %s\n", sink.path, err.Error())
-	}
-}
-
-// Unreference the logSink.  If there are no more references, and the logSink is
-// closeable, then we will close it here.
-func (sink *logSink) Unref() {
-	logFilesLock.Lock()
-	defer logFilesLock.Unlock()
-	sink.refCount--
-	if sink.refCount <= 0 {
-		if sink.path.IsCloseable() {
-			err := sink.file.Close()
-			if err != nil {
-				fmt.Fprintf(os.Stderr, "Error closing log file %s: %s\n",
-					sink.path, err.Error())
-			}
-		}
-		logSinks[sink.path] = nil
-	}
-}
-
-type logPath string
-
-// An empty LogPath represents "stdout."
-const STDOUT_LOG_PATH = ""
-
-// Convert a path to a logPath.
-func logPathFromString(path string) logPath {
-	if path == STDOUT_LOG_PATH {
-		return logPath("")
-	}
-	absPath, err := filepath.Abs(path)
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Failed to get absolute path of %s: %s\n",
-			path, err.Error())
-		return logPath(path)
-	}
-	return logPath(absPath)
-}
-
-// Convert the path to a human-readable string.
-func (path logPath) String() string {
-	if path == "" {
-		return "(stdout)"
-	} else {
-		return string(path)
-	}
-}
-
-// Return true if the path is closeable.  stdout is not closeable.
-func (path logPath) IsCloseable() bool {
-	return path != STDOUT_LOG_PATH
-}
-
-func (path logPath) Open() *logSink {
-	if path == STDOUT_LOG_PATH {
-		return &logSink{path: path, file: os.Stdout}
-	}
-	file, err := os.OpenFile(string(path), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
-	if err != nil {
-		sink := &logSink{path: STDOUT_LOG_PATH, file: os.Stdout}
-		fmt.Fprintf(os.Stderr, "Failed to open log file %s: %s\n",
-			path, err.Error())
-		return sink
-	}
-	return &logSink{path: path, file: file}
-}
-
-var logFilesLock sync.Mutex
-
-var logSinks map[logPath]*logSink = make(map[logPath]*logSink)
-
-func getOrCreateLogSink(pathStr string) *logSink {
-	path := logPathFromString(pathStr)
-	logFilesLock.Lock()
-	defer logFilesLock.Unlock()
-	sink := logSinks[path]
-	if sink == nil {
-		sink = path.Open()
-		logSinks[path] = sink
-	}
-	sink.refCount++
-	return sink
-}
-
-type Level int
-
-const (
-	TRACE Level = iota
-	DEBUG
-	INFO
-	WARN
-	ERROR
-)
-
-var levelToString map[Level]string = map[Level]string{
-	TRACE: "TRACE",
-	DEBUG: "DEBUG",
-	INFO:  "INFO",
-	WARN:  "WARN",
-	ERROR: "ERROR",
-}
-
-func (level Level) String() string {
-	return levelToString[level]
-}
-
-func (level Level) LogString() string {
-	return level.String()[0:1]
-}
-
-func LevelFromString(str string) (Level, error) {
-	for k, v := range levelToString {
-		if strings.ToLower(v) == strings.ToLower(str) {
-			return k, nil
-		}
-	}
-	var levelNames sort.StringSlice
-	levelNames = make([]string, len(levelToString))
-	var i int
-	for _, v := range levelToString {
-		levelNames[i] = v
-		i++
-	}
-	sort.Sort(levelNames)
-	return TRACE, errors.New(fmt.Sprintf("No such level as '%s'.  Valid "+
-		"levels are '%v'\n", str, levelNames))
-}
-
-type Logger struct {
-	sink  *logSink
-	Level Level
-}
-
-func NewLogger(faculty string, cnf *conf.Config) *Logger {
-	path, level := parseConf(faculty, cnf)
-	sink := getOrCreateLogSink(path)
-	return &Logger{sink: sink, Level: level}
-}
-
-func parseConf(faculty string, cnf *conf.Config) (string, Level) {
-	facultyLogPathKey := faculty + "." + conf.HTRACE_LOG_PATH
-	var facultyLogPath string
-	if cnf.Contains(facultyLogPathKey) {
-		facultyLogPath = cnf.Get(facultyLogPathKey)
-	} else {
-		facultyLogPath = cnf.Get(conf.HTRACE_LOG_PATH)
-	}
-	facultyLogLevelKey := faculty + "." + conf.HTRACE_LOG_LEVEL
-	var facultyLogLevelStr string
-	if cnf.Contains(facultyLogLevelKey) {
-		facultyLogLevelStr = cnf.Get(facultyLogLevelKey)
-	} else {
-		facultyLogLevelStr = cnf.Get(conf.HTRACE_LOG_LEVEL)
-	}
-	level, err := LevelFromString(facultyLogLevelStr)
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Error configuring log level: %s.  Using TRACE.\n")
-		level = TRACE
-	}
-	return facultyLogPath, level
-}
-
-func (lg *Logger) Trace(str string) {
-	lg.Write(TRACE, str)
-}
-
-func (lg *Logger) Tracef(format string, v ...interface{}) {
-	lg.Write(TRACE, fmt.Sprintf(format, v...))
-}
-
-func (lg *Logger) Debug(str string) {
-	lg.Write(DEBUG, str)
-}
-
-func (lg *Logger) Debugf(format string, v ...interface{}) {
-	lg.Write(DEBUG, fmt.Sprintf(format, v...))
-}
-
-func (lg *Logger) Info(str string) {
-	lg.Write(INFO, str)
-}
-
-func (lg *Logger) Infof(format string, v ...interface{}) {
-	lg.Write(INFO, fmt.Sprintf(format, v...))
-}
-
-func (lg *Logger) Warn(str string) error {
-	lg.Write(WARN, str)
-	return errors.New(str)
-}
-
-func (lg *Logger) Warnf(format string, v ...interface{}) error {
-	str := fmt.Sprintf(format, v...)
-	lg.Write(WARN, str)
-	return errors.New(str)
-}
-
-func (lg *Logger) Error(str string) error {
-	lg.Write(ERROR, str)
-	return errors.New(str)
-}
-
-func (lg *Logger) Errorf(format string, v ...interface{}) error {
-	str := fmt.Sprintf(format, v...)
-	lg.Write(ERROR, str)
-	return errors.New(str)
-}
-
-func (lg *Logger) Write(level Level, str string) {
-	if level >= lg.Level {
-		lg.sink.write(time.Now().UTC().Format(time.RFC3339) + " " +
-			level.LogString() + ": " + str)
-	}
-}
-
-//
-// A few functions which can be used to determine if a certain level of tracing
-// is enabled.  These are useful in situations when evaluating the parameters
-// of a logging function is expensive.  (Note, however, that we don't pay the
-// cost of string concatenation and manipulation when a log message doesn't
-// trigger.)
-//
-
-func (lg *Logger) TraceEnabled() bool {
-	return lg.Level <= TRACE
-}
-
-func (lg *Logger) DebugEnabled() bool {
-	return lg.Level <= DEBUG
-}
-
-func (lg *Logger) InfoEnabled() bool {
-	return lg.Level <= INFO
-}
-
-func (lg *Logger) WarnEnabled() bool {
-	return lg.Level <= WARN
-}
-
-func (lg *Logger) ErrorEnabled() bool {
-	return lg.Level <= ERROR
-}
-
-func (lg *Logger) LevelEnabled(level Level) bool {
-	return lg.Level <= level
-}
-
-func (lg *Logger) Close() {
-	lg.sink.Unref()
-	lg.sink = nil
-}
-
-// Wraps an htrace logger in a golang standard logger.
-//
-// This is a bit messy because of the difference in interfaces between the
-// golang standard logger and the htrace logger.  The golang standard logger
-// doesn't support log levels directly, so you must choose up front what htrace
-// log level all messages should be treated as.  Golang standard loggers expect
-// to be able to write to an io.Writer, but make no guarantees about whether
-// they will break messages into multiple Write() calls (although this does
-// not seem to be a major problem in practice.)
-//
-// Despite these limitations, it's still useful to have this method to be able
-// to log things that come out of the go HTTP server and other standard library
-// systems.
-type WrappedLogger struct {
-	lg    *Logger
-	level Level
-}
-
-func (lg *Logger) Wrap(prefix string, level Level) *log.Logger {
-	wlg := &WrappedLogger{
-		lg:    lg,
-		level: level,
-	}
-	return log.New(wlg, prefix, 0)
-}
-
-func (wlg *WrappedLogger) Write(p []byte) (int, error) {
-	wlg.lg.Write(wlg.level, string(p))
-	return len(p), nil
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/log_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/log_test.go b/htrace-htraced/go/src/org/apache/htrace/common/log_test.go
deleted file mode 100644
index f0b1cde..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/log_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"org/apache/htrace/conf"
-	"os"
-	"strings"
-	"testing"
-)
-
-func newLogger(faculty string, args ...string) *Logger {
-	cnfBld := conf.Builder{Defaults: conf.DEFAULTS}
-	cnf, err := cnfBld.Build()
-	if err != nil {
-		panic(fmt.Sprintf("failed to create conf: %s", err.Error()))
-	}
-	cnf2 := cnf.Clone(args...)
-	lg := NewLogger(faculty, cnf2)
-	return lg
-}
-
-func TestNewLogger(t *testing.T) {
-	lg := newLogger("foo", "log.level", "TRACE")
-	lg.Close()
-}
-
-func verifyLines(t *testing.T, rdr io.Reader, lines []string) {
-	scanner := bufio.NewScanner(rdr)
-	lineIdx := 0
-	for scanner.Scan() {
-		line := scanner.Text()
-		if !strings.Contains(line, lines[lineIdx]) {
-			t.Fatalf("Error on line %d: didn't find substring '%s' in line '%s'\n",
-				(lineIdx + 1), lines[lineIdx], line)
-		}
-		lineIdx++
-	}
-	if err := scanner.Err(); err != nil {
-		t.Fatal(err.Error())
-	}
-}
-
-func TestFileLogs(t *testing.T) {
-	tempDir, err := ioutil.TempDir(os.TempDir(), "testFileLogs")
-	if err != nil {
-		panic(fmt.Sprintf("error creating tempdir: %s\n", err.Error()))
-	}
-	defer os.RemoveAll(tempDir)
-	logPath := tempDir + conf.PATH_SEP + "log"
-	lg := newLogger("foo", "log.level", "DEBUG",
-		"foo.log.level", "INFO",
-		"log.path", logPath)
-	lg.Tracef("Non-important stuff, ignore this.\n")
-	lg.Infof("problem with the foobar\n")
-	lg.Tracef("More non-important stuff, also ignore this.\n")
-	lg.Infof("and another problem with the foobar\n")
-	logFile, err := os.Open(logPath)
-	if err != nil {
-		t.Fatalf("failed to open file %s: %s\n", logPath, err.Error())
-	}
-	verifyLines(t, logFile, []string{
-		"problem with the foobar",
-		"and another problem with the foobar",
-	})
-	logFile.Close()
-	lg.Close()
-}
-
-func TestMultipleFileLogs(t *testing.T) {
-	tempDir, err := ioutil.TempDir(os.TempDir(), "testMultipleFileLogs")
-	if err != nil {
-		panic(fmt.Sprintf("error creating tempdir: %s\n", err.Error()))
-	}
-	defer os.RemoveAll(tempDir)
-	logPath := tempDir + conf.PATH_SEP + "log"
-	fooLg := newLogger("foo", "log.level", "DEBUG",
-		"foo.log.level", "INFO",
-		"log.path", logPath)
-	fooLg.Infof("The foo needs maintenance.\n")
-	barLg := newLogger("bar", "log.level", "DEBUG",
-		"foo.log.level", "INFO",
-		"log.path", logPath)
-	barLg.Debugf("The bar is open\n")
-	fooLg.Errorf("Fizz buzz\n")
-	logFile, err := os.Open(logPath)
-	if err != nil {
-		t.Fatalf("failed to open file %s: %s\n", logPath, err.Error())
-	}
-	fooLg.Tracef("Fizz buzz2\n")
-	barLg.Tracef("Fizz buzz3\n")
-	verifyLines(t, logFile, []string{
-		"The foo needs maintenance.",
-		"The bar is open",
-		"Fizz buzz",
-		"Fizz buzz3",
-	})
-	logFile.Close()
-	fooLg.Close()
-	barLg.Close()
-}
-
-func TestLogLevelEnabled(t *testing.T) {
-	tempDir, err := ioutil.TempDir(os.TempDir(), "TestLogLevelEnabled")
-	if err != nil {
-		panic(fmt.Sprintf("error creating tempdir: %s\n", err.Error()))
-	}
-	defer os.RemoveAll(tempDir)
-	// set log level to DEBUG for facility "foo"
-	logPath := tempDir + conf.PATH_SEP + "log"
-	lg := newLogger("foo", "log.level", "DEBUG",
-		"foo.log.level", "INFO",
-		"log.path", logPath)
-	if lg.TraceEnabled() {
-		t.Fatalf("foo logger has TraceEnabled")
-	}
-	if lg.DebugEnabled() {
-		t.Fatalf("foo logger have DebugEnabled")
-	}
-	if !lg.InfoEnabled() {
-		t.Fatalf("foo logger does not have InfoEnabled")
-	}
-	if !lg.WarnEnabled() {
-		t.Fatalf("foo logger does not have WarnEnabled")
-	}
-	if !lg.ErrorEnabled() {
-		t.Fatalf("foo logger does not have ErrorEnabled")
-	}
-	lg.Close()
-	lg = newLogger("foo", "log.level", "WARN",
-		"foo.log.level", "INFO",
-		"log.path", logPath)
-	if lg.TraceEnabled() {
-		t.Fatalf("foo logger has TraceEnabled")
-	}
-	if lg.DebugEnabled() {
-		t.Fatalf("foo logger has DebugEnabled")
-	}
-	if !lg.InfoEnabled() {
-		t.Fatalf("foo logger does not have InfoEnabled")
-	}
-	if !lg.WarnEnabled() {
-		t.Fatalf("foo logger does not have WarnEnabled")
-	}
-	if !lg.ErrorEnabled() {
-		t.Fatalf("foo logger does not have ErrorEnabled")
-	}
-	lg.Close()
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/process.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/process.go b/htrace-htraced/go/src/org/apache/htrace/common/process.go
deleted file mode 100644
index ce32067..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/process.go
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"bytes"
-	"fmt"
-	"org/apache/htrace/conf"
-	"os"
-	"os/signal"
-	"runtime"
-	"runtime/debug"
-	"syscall"
-)
-
-func InstallSignalHandlers(cnf *conf.Config) {
-	fatalSigs := []os.Signal{
-		os.Interrupt,
-		os.Kill,
-		syscall.SIGINT,
-		syscall.SIGABRT,
-		syscall.SIGALRM,
-		syscall.SIGBUS,
-		syscall.SIGFPE,
-		syscall.SIGILL,
-		syscall.SIGSEGV,
-		syscall.SIGTERM,
-	}
-	fatalSigChan := make(chan os.Signal, 1)
-	signal.Notify(fatalSigChan, fatalSigs...)
-	lg := NewLogger("signal", cnf)
-	go func() {
-		sig := <-fatalSigChan
-		lg.Errorf("Terminating on signal: %v\n", sig)
-		lg.Close()
-		os.Exit(1)
-	}()
-
-	sigQuitChan := make(chan os.Signal, 1)
-	signal.Notify(sigQuitChan, syscall.SIGQUIT)
-	go func() {
-		stackTraceBuf := make([]byte, 1<<20)
-		for {
-			<-sigQuitChan
-			GetStackTraces(&stackTraceBuf)
-			lg.Info("=== received SIGQUIT ===\n")
-			lg.Info("=== GOROUTINE STACKS ===\n")
-			lg.Info(string(stackTraceBuf))
-			lg.Info("\n=== END GOROUTINE STACKS ===\n")
-			lg.Info("=== GC STATISTICS ===\n")
-			lg.Info(GetGCStats())
-			lg.Info("=== END GC STATISTICS ===\n")
-		}
-	}()
-}
-
-func GetStackTraces(buf *[]byte) {
-	*buf = (*buf)[0:cap(*buf)]
-	neededBytes := runtime.Stack(*buf, true)
-	for neededBytes > len(*buf) {
-		*buf = make([]byte, neededBytes)
-		runtime.Stack(*buf, true)
-	}
-	*buf = (*buf)[0:neededBytes]
-}
-
-func GetGCStats() string {
-	gcs := debug.GCStats{}
-	debug.ReadGCStats(&gcs)
-	var buf bytes.Buffer
-	buf.WriteString(fmt.Sprintf("LastGC: %s\n", gcs.LastGC.UTC().String()))
-	buf.WriteString(fmt.Sprintf("NumGC: %d\n", gcs.NumGC))
-	buf.WriteString(fmt.Sprintf("PauseTotal: %v\n", gcs.PauseTotal))
-	if gcs.Pause != nil {
-		pauseStr := ""
-		prefix := ""
-		for p := range gcs.Pause {
-			pauseStr += prefix + gcs.Pause[p].String()
-			prefix = ", "
-		}
-		buf.WriteString(fmt.Sprintf("Pause History: %s\n", pauseStr))
-	}
-	return buf.String()
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/process_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/process_test.go b/htrace-htraced/go/src/org/apache/htrace/common/process_test.go
deleted file mode 100644
index d3f5a56..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/process_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"bufio"
-	"fmt"
-	"org/apache/htrace/conf"
-	"os"
-	"os/exec"
-	"strings"
-	"syscall"
-	"testing"
-	"time"
-)
-
-const HTRACED_TEST_HELPER_PROCESS = "HTRACED_TEST_HELPER_PROCESS"
-
-// This test runs a helper process which will install our htraced signal
-// handlers.  We will send signals to the subprocess and verify that it has
-// caught them and responded appropriately.
-func TestSignals(t *testing.T) {
-	if os.Getenv(HTRACED_TEST_HELPER_PROCESS) == "1" {
-		runHelperProcess()
-		os.Exit(0)
-	}
-	helper := exec.Command(os.Args[0], "-test.run=TestSignals", "--")
-	helper.Env = []string{HTRACED_TEST_HELPER_PROCESS + "=1"}
-	stdoutPipe, err := helper.StdoutPipe()
-	if err != nil {
-		panic(fmt.Sprintf("Failed to open pipe to process stdout: %s",
-			err.Error()))
-	}
-	stderrPipe, err := helper.StderrPipe()
-	if err != nil {
-		panic(fmt.Sprintf("Failed to open pipe to process stderr: %s",
-			err.Error()))
-	}
-	err = helper.Start()
-	if err != nil {
-		t.Fatal("Failed to start command %s: %s\n", os.Args[0], err.Error())
-	}
-	t.Logf("Started suprocess...\n")
-	done := make(chan interface{})
-	go func() {
-		scanner := bufio.NewScanner(stdoutPipe)
-		for scanner.Scan() {
-			text := scanner.Text()
-			if strings.Contains(text, "=== GOROUTINE STACKS ===") {
-				break
-			}
-		}
-		t.Logf("Saw 'GOROUTINE STACKS on stdout.'  Sending SIGINT.\n")
-		helper.Process.Signal(syscall.SIGINT)
-		for scanner.Scan() {
-			text := scanner.Text()
-			if strings.Contains(text, "Terminating on signal: SIGINT") {
-				break
-			}
-		}
-		t.Logf("Saw 'Terminating on signal: SIGINT'.  " +
-			"Helper goroutine exiting.\n")
-		done <- nil
-	}()
-	scanner := bufio.NewScanner(stderrPipe)
-	for scanner.Scan() {
-		text := scanner.Text()
-		if strings.Contains(text, "Signal handler installed.") {
-			break
-		}
-	}
-	t.Logf("Saw 'Signal handler installed.'  Sending SIGINT.")
-	helper.Process.Signal(syscall.SIGQUIT)
-	t.Logf("Waiting for helper goroutine to exit.\n")
-	<-done
-	t.Logf("Waiting for subprocess to exit.\n")
-	helper.Wait()
-	t.Logf("Done.")
-}
-
-// Run the helper process which TestSignals spawns.
-func runHelperProcess() {
-	cnfMap := map[string]string{
-		conf.HTRACE_LOG_LEVEL: "TRACE",
-		conf.HTRACE_LOG_PATH:  "", // log to stdout
-	}
-	cnfBld := conf.Builder{Values: cnfMap, Defaults: conf.DEFAULTS}
-	cnf, err := cnfBld.Build()
-	if err != nil {
-		fmt.Printf("Error building configuration: %s\n", err.Error())
-		os.Exit(1)
-	}
-	InstallSignalHandlers(cnf)
-	fmt.Fprintf(os.Stderr, "Signal handler installed.\n")
-	// Wait for a signal to be delivered
-	for {
-		time.Sleep(time.Hour * 100)
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/query.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/query.go b/htrace-htraced/go/src/org/apache/htrace/common/query.go
deleted file mode 100644
index 7a9e523..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/query.go
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"encoding/json"
-)
-
-//
-// Represents queries that can be sent to htraced.
-//
-// Each query consists of set of predicates that will be 'AND'ed together to
-// return a set of spans.  Predicates contain an operation, a field, and a
-// value.
-//
-// For example, a query might be "return the first 100 spans between 5:00pm
-// and 5:01pm"  This query would have two predicates: time greater than or
-// equal to 5:00pm, and time less than or equal to 5:01pm.
-// In HTrace, times are always expressed in milliseconds since the Epoch.
-// So this would become:
-// { "lim" : 100, "pred" : [
-//   { "op" : "ge", "field" : "begin", "val" : 1234 },
-//   { "op" : "le", "field" : "begin", "val" : 5678 },
-// ] }
-//
-// Where '1234' and '5678' were replaced by times since the epoch in
-// milliseconds.
-//
-
-type Op string
-
-const (
-	CONTAINS               Op = "cn"
-	EQUALS                 Op = "eq"
-	LESS_THAN_OR_EQUALS    Op = "le"
-	GREATER_THAN_OR_EQUALS Op = "ge"
-	GREATER_THAN           Op = "gt"
-)
-
-func (op Op) IsDescending() bool {
-	return op == LESS_THAN_OR_EQUALS
-}
-
-func (op Op) IsValid() bool {
-	ops := ValidOps()
-	for i := range ops {
-		if ops[i] == op {
-			return true
-		}
-	}
-	return false
-}
-
-func ValidOps() []Op {
-	return []Op{CONTAINS, EQUALS, LESS_THAN_OR_EQUALS, GREATER_THAN_OR_EQUALS,
-		GREATER_THAN}
-}
-
-type Field string
-
-const (
-	SPAN_ID     Field = "spanid"
-	DESCRIPTION Field = "description"
-	BEGIN_TIME  Field = "begin"
-	END_TIME    Field = "end"
-	DURATION    Field = "duration"
-	TRACER_ID   Field = "tracerid"
-)
-
-func (field Field) IsValid() bool {
-	fields := ValidFields()
-	for i := range fields {
-		if fields[i] == field {
-			return true
-		}
-	}
-	return false
-}
-
-func ValidFields() []Field {
-	return []Field{SPAN_ID, DESCRIPTION, BEGIN_TIME, END_TIME,
-		DURATION, TRACER_ID}
-}
-
-type Predicate struct {
-	Op    Op     `json:"op"`
-	Field Field  `json:"field"`
-	Val   string `val:"val"`
-}
-
-func (pred *Predicate) String() string {
-	buf, err := json.Marshal(pred)
-	if err != nil {
-		panic(err)
-	}
-	return string(buf)
-}
-
-type Query struct {
-	Predicates []Predicate `json:"pred"`
-	Lim        int         `json:"lim"`
-	Prev       *Span       `json:"prev"`
-}
-
-func (query *Query) String() string {
-	buf, err := json.Marshal(query)
-	if err != nil {
-		panic(err)
-	}
-	return string(buf)
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/query_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/query_test.go b/htrace-htraced/go/src/org/apache/htrace/common/query_test.go
deleted file mode 100644
index 2697d9c..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/query_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"testing"
-)
-
-func TestValidOps(t *testing.T) {
-	for i := range ValidOps() {
-		op := ValidOps()[i]
-		if !op.IsValid() {
-			t.Fatalf("op %s was in ValidOps, but IsValid returned false.\n", op)
-		}
-	}
-	invalidOp := Op("completelybogus")
-	if invalidOp.IsValid() {
-		t.Fatalf("op %s was invalid, but IsValid returned true.\n", invalidOp)
-	}
-}
-
-func TestValidFields(t *testing.T) {
-	for i := range ValidFields() {
-		field := ValidFields()[i]
-		if !field.IsValid() {
-			t.Fatalf("field %s was in ValidFields, but IsValid returned false.\n", field)
-		}
-	}
-	invalidField := Field("completelybogus")
-	if invalidField.IsValid() {
-		t.Fatalf("field %s was invalid, but IsValid returned true.\n", invalidField)
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/rpc.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/rpc.go b/htrace-htraced/go/src/org/apache/htrace/common/rpc.go
deleted file mode 100644
index 5f02db6..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/rpc.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-// The 4-byte magic number which is sent first in the HRPC header
-const HRPC_MAGIC = 0x43525448
-
-// Method ID codes.  Do not reorder these.
-const (
-	METHOD_ID_NONE        = 0
-	METHOD_ID_WRITE_SPANS = iota
-)
-
-const METHOD_NAME_WRITE_SPANS = "HrpcHandler.WriteSpans"
-
-// Maximum length of the error message passed in an HRPC response
-const MAX_HRPC_ERROR_LENGTH = 4 * 1024 * 1024
-
-// Maximum length of HRPC message body
-const MAX_HRPC_BODY_LENGTH = 32 * 1024 * 1024
-
-// A request to write spans to htraced.
-// This request is followed by a sequence of spans.
-type WriteSpansReq struct {
-	DefaultTrid string `json:",omitempty"`
-	NumSpans    int
-}
-
-// Info returned by /server/version
-type ServerVersion struct {
-	// The server release version.
-	ReleaseVersion string
-
-	// The git hash that this software was built with.
-	GitVersion string
-}
-
-// A response to a WriteSpansReq
-type WriteSpansResp struct {
-}
-
-// The header which is sent over the wire for HRPC
-type HrpcRequestHeader struct {
-	Magic    uint32
-	MethodId uint32
-	Seq      uint64
-	Length   uint32
-}
-
-// The response which is sent over the wire for HRPC
-type HrpcResponseHeader struct {
-	Seq       uint64
-	MethodId  uint32
-	ErrLength uint32
-	Length    uint32
-}
-
-func HrpcMethodIdToMethodName(id uint32) string {
-	switch id {
-	case METHOD_ID_WRITE_SPANS:
-		return METHOD_NAME_WRITE_SPANS
-	default:
-		return ""
-	}
-}
-
-func HrpcMethodNameToId(name string) uint32 {
-	switch name {
-	case METHOD_NAME_WRITE_SPANS:
-		return METHOD_ID_WRITE_SPANS
-	default:
-		return METHOD_ID_NONE
-	}
-}
-
-type SpanMetrics struct {
-	// The total number of spans written to HTraced.
-	Written uint64
-
-	// The total number of spans dropped by the server.
-	ServerDropped uint64
-}
-
-// A map from network address strings to SpanMetrics structures.
-type SpanMetricsMap map[string]*SpanMetrics
-
-// Info returned by /server/stats
-type ServerStats struct {
-	// Statistics for each shard (directory)
-	Dirs []StorageDirectoryStats
-
-	// Per-host Span Metrics
-	HostSpanMetrics SpanMetricsMap
-
-	// The time (in UTC milliseconds since the epoch) when the
-	// datastore was last started.
-	LastStartMs int64
-
-	// The current time (in UTC milliseconds since the epoch) on the server.
-	CurMs int64
-
-	// The total number of spans which have been reaped.
-	ReapedSpans uint64
-
-	// The total number of spans which have been ingested since the server started, by WriteSpans
-	// requests.  This number counts spans that didn't get written to persistent storage as well as
-	// those that did.
-	IngestedSpans uint64
-
-	// The total number of spans which have been written to leveldb since the server started.
-	WrittenSpans uint64
-
-	// The total number of spans dropped by the server since the server started.
-	ServerDroppedSpans uint64
-
-	// The maximum latency of a writeSpans request, in milliseconds.
-	MaxWriteSpansLatencyMs uint32
-
-	// The average latency of a writeSpans request, in milliseconds.
-	AverageWriteSpansLatencyMs uint32
-}
-
-type StorageDirectoryStats struct {
-	Path string
-
-	// The approximate number of bytes on disk present in this shard.
-	ApproximateBytes uint64
-
-	// leveldb.stats information
-	LevelDbStats string
-}
-
-type ServerDebugInfoReq struct {
-}
-
-type ServerDebugInfo struct {
-	// Stack traces from all goroutines
-	StackTraces string
-
-	// Garbage collection statistics
-	GCStats string
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/semaphore.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/semaphore.go b/htrace-htraced/go/src/org/apache/htrace/common/semaphore.go
deleted file mode 100644
index 1acde76..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/semaphore.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"sync"
-)
-
-// A simple lock-and-condition-variable based semaphore implementation.
-type Semaphore struct {
-	lock  sync.Mutex
-	cond  *sync.Cond
-	count int64
-}
-
-func NewSemaphore(count int64) *Semaphore {
-	sem := &Semaphore{
-		count: int64(count),
-	}
-	sem.cond = &sync.Cond{
-		L: &sem.lock,
-	}
-	return sem
-}
-
-func (sem *Semaphore) Post() {
-	sem.lock.Lock()
-	sem.count++
-	if sem.count > 0 {
-		sem.cond.Broadcast()
-	}
-	sem.lock.Unlock()
-}
-
-func (sem *Semaphore) Posts(amt int64) {
-	sem.lock.Lock()
-	sem.count += amt
-	if sem.count > 0 {
-		sem.cond.Broadcast()
-	}
-	sem.lock.Unlock()
-}
-
-func (sem *Semaphore) Wait() {
-	sem.lock.Lock()
-	for {
-		if sem.count > 0 {
-			sem.count--
-			sem.lock.Unlock()
-			return
-		}
-		sem.cond.Wait()
-	}
-}
-
-func (sem *Semaphore) Waits(amt int64) {
-	var i int64
-	for i = 0; i < amt; i++ {
-		sem.Wait()
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/semaphore_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/semaphore_test.go b/htrace-htraced/go/src/org/apache/htrace/common/semaphore_test.go
deleted file mode 100644
index 089c51b..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/semaphore_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"sync/atomic"
-	"testing"
-	"time"
-)
-
-func TestSemaphoreWake(t *testing.T) {
-	var done uint32
-	sem := NewSemaphore(0)
-	go func() {
-		time.Sleep(10 * time.Nanosecond)
-		atomic.AddUint32(&done, 1)
-		sem.Post()
-	}()
-	sem.Wait()
-	doneVal := atomic.LoadUint32(&done)
-	if doneVal != 1 {
-		t.Fatalf("sem.Wait did not wait for sem.Post")
-	}
-}
-
-func TestSemaphoreCount(t *testing.T) {
-	sem := NewSemaphore(1)
-	sem.Post()
-	sem.Wait()
-	sem.Wait()
-
-	sem = NewSemaphore(-1)
-	sem.Post()
-	sem.Post()
-	sem.Wait()
-}
-
-func TestSemaphoreMultipleGoroutines(t *testing.T) {
-	var done uint32
-	sem := NewSemaphore(0)
-	sem2 := NewSemaphore(0)
-	go func() {
-		sem.Wait()
-		atomic.AddUint32(&done, 1)
-		sem2.Post()
-	}()
-	go func() {
-		time.Sleep(10 * time.Nanosecond)
-		atomic.AddUint32(&done, 1)
-		sem.Post()
-	}()
-	go func() {
-		time.Sleep(20 * time.Nanosecond)
-		atomic.AddUint32(&done, 1)
-		sem.Post()
-	}()
-	sem.Wait()
-	go func() {
-		time.Sleep(10 * time.Nanosecond)
-		atomic.AddUint32(&done, 1)
-		sem.Post()
-	}()
-	sem.Wait()
-	sem2.Wait()
-	doneVal := atomic.LoadUint32(&done)
-	if doneVal != 4 {
-		t.Fatalf("sem.Wait did not wait for sem.Posts")
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/span.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/span.go b/htrace-htraced/go/src/org/apache/htrace/common/span.go
deleted file mode 100644
index 1716c5a..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/span.go
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"bytes"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"hash/fnv"
-)
-
-//
-// Represents a trace span.
-//
-// Compatibility notes:
-// When converting to JSON, we store the 64-bit numbers as hexadecimal strings rather than as
-// integers.  This is because JavaScript lacks the ability to handle 64-bit integers.  Numbers above
-// about 55 bits will be rounded by Javascript.  Since the Javascript UI is a primary consumer of
-// this JSON data, we have to simply pass it as a string.
-//
-
-type TraceInfoMap map[string]string
-
-type TimelineAnnotation struct {
-	Time int64  `json:"t"`
-	Msg  string `json:"m"`
-}
-
-type SpanId []byte
-
-var INVALID_SPAN_ID SpanId = make([]byte, 16) // all zeroes
-
-func (id SpanId) String() string {
-	return fmt.Sprintf("%02x%02x%02x%02x"+
-		"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
-		id[0], id[1], id[2], id[3], id[4], id[5], id[6], id[7], id[8],
-		id[9], id[10], id[11], id[12], id[13], id[14], id[15])
-}
-
-func (id SpanId) Val() []byte {
-	return []byte(id)
-}
-
-func (id SpanId) FindProblem() string {
-	if id == nil {
-		return "The span ID is nil"
-	}
-	if len(id) != 16 {
-		return "The span ID is not exactly 16 bytes."
-	}
-	if bytes.Equal(id.Val(), INVALID_SPAN_ID.Val()) {
-		return "The span ID is all zeros."
-	}
-	return ""
-}
-
-func (id SpanId) ToArray() [16]byte {
-	var ret [16]byte
-	copy(ret[:], id.Val()[:])
-	return ret
-}
-
-// Return the next ID in lexicographical order.  For the maximum ID,
-// returns the minimum.
-func (id SpanId) Next() SpanId {
-	next := make([]byte, 16)
-	copy(next, id)
-	for i := len(next) - 1; i >= 0; i-- {
-		if next[i] == 0xff {
-			next[i] = 0
-		} else {
-			next[i] = next[i] + 1
-			break
-		}
-	}
-	return next
-}
-
-// Return the previous ID in lexicographical order.  For the minimum ID,
-// returns the maximum ID.
-func (id SpanId) Prev() SpanId {
-	prev := make([]byte, 16)
-	copy(prev, id)
-	for i := len(prev) - 1; i >= 0; i-- {
-		if prev[i] == 0x00 {
-			prev[i] = 0xff
-		} else {
-			prev[i] = prev[i] - 1
-			break
-		}
-	}
-	return prev
-}
-
-func (id SpanId) MarshalJSON() ([]byte, error) {
-	return []byte(`"` + id.String() + `"`), nil
-}
-
-func (id SpanId) Compare(other SpanId) int {
-	return bytes.Compare(id.Val(), other.Val())
-}
-
-func (id SpanId) Equal(other SpanId) bool {
-	return bytes.Equal(id.Val(), other.Val())
-}
-
-func (id SpanId) Hash32() uint32 {
-	h := fnv.New32a()
-	h.Write(id.Val())
-	return h.Sum32()
-}
-
-type SpanSlice []*Span
-
-func (s SpanSlice) Len() int {
-	return len(s)
-}
-
-func (s SpanSlice) Less(i, j int) bool {
-	return s[i].Id.Compare(s[j].Id) < 0
-}
-
-func (s SpanSlice) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
-
-type SpanIdSlice []SpanId
-
-func (s SpanIdSlice) Len() int {
-	return len(s)
-}
-
-func (s SpanIdSlice) Less(i, j int) bool {
-	return s[i].Compare(s[j]) < 0
-}
-
-func (s SpanIdSlice) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
-
-const DOUBLE_QUOTE = 0x22
-
-func (id *SpanId) UnmarshalJSON(b []byte) error {
-	if b[0] != DOUBLE_QUOTE {
-		return errors.New("Expected spanID to start with a string quote.")
-	}
-	if b[len(b)-1] != DOUBLE_QUOTE {
-		return errors.New("Expected spanID to end with a string quote.")
-	}
-	return id.FromString(string(b[1 : len(b)-1]))
-}
-
-func (id *SpanId) FromString(str string) error {
-	i := SpanId(make([]byte, 16))
-	n, err := fmt.Sscanf(str, "%02x%02x%02x%02x"+
-		"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
-		&i[0], &i[1], &i[2], &i[3], &i[4], &i[5], &i[6], &i[7], &i[8],
-		&i[9], &i[10], &i[11], &i[12], &i[13], &i[14], &i[15])
-	if err != nil {
-		return err
-	}
-	if n != 16 {
-		return errors.New("Failed to find 16 hex digits in the SpanId")
-	}
-	*id = i
-	return nil
-}
-
-type SpanData struct {
-	Begin               int64                `json:"b"`
-	End                 int64                `json:"e"`
-	Description         string               `json:"d"`
-	Parents             []SpanId             `json:"p"`
-	Info                TraceInfoMap         `json:"n,omitempty"`
-	TracerId            string               `json:"r"`
-	TimelineAnnotations []TimelineAnnotation `json:"t,omitempty"`
-}
-
-type Span struct {
-	Id SpanId `json:"a"`
-	SpanData
-}
-
-func (span *Span) ToJson() []byte {
-	jbytes, err := json.Marshal(*span)
-	if err != nil {
-		panic(err)
-	}
-	return jbytes
-}
-
-func (span *Span) String() string {
-	return string(span.ToJson())
-}
-
-// Compute the span duration.  We ignore overflow since we never deal with negative times.
-func (span *Span) Duration() int64 {
-	return span.End - span.Begin
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/span_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/span_test.go b/htrace-htraced/go/src/org/apache/htrace/common/span_test.go
deleted file mode 100644
index 7fb128d..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/span_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"bytes"
-	"encoding/hex"
-	"fmt"
-	"github.com/ugorji/go/codec"
-	"testing"
-)
-
-func TestSpanToJson(t *testing.T) {
-	t.Parallel()
-	span := Span{Id: TestId("33f25a1a750a471db5bafa59309d7d6f"),
-		SpanData: SpanData{
-			Begin:       123,
-			End:         456,
-			Description: "getFileDescriptors",
-			Parents:     []SpanId{},
-			TracerId:    "testTracerId",
-		}}
-	ExpectStrEqual(t,
-		`{"a":"33f25a1a750a471db5bafa59309d7d6f","b":123,"e":456,"d":"getFileDescriptors","p":[],"r":"testTracerId"}`,
-		string(span.ToJson()))
-}
-
-func TestAnnotatedSpanToJson(t *testing.T) {
-	t.Parallel()
-	span := Span{Id: TestId("11eace42e6404b40a7644214cb779a08"),
-		SpanData: SpanData{
-			Begin:       1234,
-			End:         4567,
-			Description: "getFileDescriptors2",
-			Parents:     []SpanId{},
-			TracerId:    "testAnnotatedTracerId",
-			TimelineAnnotations: []TimelineAnnotation{
-				TimelineAnnotation{
-					Time: 7777,
-					Msg:  "contactedServer",
-				},
-				TimelineAnnotation{
-					Time: 8888,
-					Msg:  "passedFd",
-				},
-			},
-		}}
-	ExpectStrEqual(t,
-		`{"a":"11eace42e6404b40a7644214cb779a08","b":1234,"e":4567,"d":"getFileDescriptors2","p":[],"r":"testAnnotatedTracerId","t":[{"t":7777,"m":"contactedServer"},{"t":8888,"m":"passedFd"}]}`,
-		string(span.ToJson()))
-}
-
-func TestSpanNext(t *testing.T) {
-	ExpectStrEqual(t, TestId("00000000000000000000000000000001").String(),
-		TestId("00000000000000000000000000000000").Next().String())
-	ExpectStrEqual(t, TestId("00000000000000000000000000f00000").String(),
-		TestId("00000000000000000000000000efffff").Next().String())
-	ExpectStrEqual(t, TestId("00000000000000000000000000000000").String(),
-		TestId("ffffffffffffffffffffffffffffffff").Next().String())
-}
-
-func TestSpanPrev(t *testing.T) {
-	ExpectStrEqual(t, TestId("00000000000000000000000000000000").String(),
-		TestId("00000000000000000000000000000001").Prev().String())
-	ExpectStrEqual(t, TestId("00000000000000000000000000efffff").String(),
-		TestId("00000000000000000000000000f00000").Prev().String())
-	ExpectStrEqual(t, TestId("ffffffffffffffffffffffffffffffff").String(),
-		TestId("00000000000000000000000000000000").Prev().String())
-}
-
-func TestSpanMsgPack(t *testing.T) {
-	span := Span{Id: TestId("33f25a1a750a471db5bafa59309d7d6f"),
-		SpanData: SpanData{
-			Begin:       1234,
-			End:         5678,
-			Description: "getFileDescriptors",
-			Parents:     []SpanId{},
-			TracerId:    "testTracerId",
-		}}
-	mh := new(codec.MsgpackHandle)
-	mh.WriteExt = true
-	w := bytes.NewBuffer(make([]byte, 0, 2048))
-	enc := codec.NewEncoder(w, mh)
-	err := enc.Encode(span)
-	if err != nil {
-		t.Fatal("Error encoding span as msgpack: " + err.Error())
-	}
-	buf := w.Bytes()
-	fmt.Printf("span: %s\n", hex.EncodeToString(buf))
-	mh = new(codec.MsgpackHandle)
-	mh.WriteExt = true
-	dec := codec.NewDecoder(bytes.NewReader(buf), mh)
-	var span2 Span
-	err = dec.Decode(&span2)
-	if err != nil {
-		t.Fatal("Failed to reverse msgpack encoding for " + span.String())
-	}
-	ExpectSpansEqual(t, &span, &span2)
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/test_util.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/test_util.go b/htrace-htraced/go/src/org/apache/htrace/common/test_util.go
deleted file mode 100644
index a761525..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/test_util.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"fmt"
-	"testing"
-	"strings"
-	"time"
-)
-
-type Int64Slice []int64
-
-func (p Int64Slice) Len() int           { return len(p) }
-func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p Int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-type SupplierFun func() bool
-
-//
-// Wait for a configurable amount of time for a precondition to become true.
-//
-// Example:
-//   WaitFor(time.Minute * 1, time.Millisecond * 1, func() bool {
-//      return ht.Store.GetStatistics().NumSpansWritten >= 3
-//  })
-//
-func WaitFor(dur time.Duration, poll time.Duration, fun SupplierFun) {
-	if poll == 0 {
-		poll = dur / 10
-	}
-	if poll <= 0 {
-		panic("Can't have a polling time less than zero.")
-	}
-	endTime := time.Now().Add(dur)
-	for {
-		if fun() {
-			return
-		}
-		if !time.Now().Before(endTime) {
-			break
-		}
-		time.Sleep(poll)
-	}
-	panic(fmt.Sprintf("Timed out after %s", dur))
-}
-
-// Trigger a test failure if two strings are not equal.
-func ExpectStrEqual(t *testing.T, expect string, actual string) {
-	if expect != actual {
-		t.Fatalf("Expected:\n%s\nGot:\n%s\n", expect, actual)
-	}
-}
-
-// Trigger a test failure if the JSON representation of two spans are not equals.
-func ExpectSpansEqual(t *testing.T, spanA *Span, spanB *Span) {
-	ExpectStrEqual(t, string(spanA.ToJson()), string(spanB.ToJson()))
-}
-
-func TestId(str string) SpanId {
-	var spanId SpanId
-	err := spanId.FromString(str)
-	if err != nil {
-		panic(err.Error())
-	}
-	return spanId
-}
-
-func AssertErrContains(t *testing.T, err error, str string) {
-	if !strings.Contains(err.Error(), str) {
-		t.Fatalf("expected the error to contain %s, but it was %s\n",
-			str, err.Error())
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/time.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/time.go b/htrace-htraced/go/src/org/apache/htrace/common/time.go
deleted file mode 100644
index 8b4b6b8..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/time.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"time"
-)
-
-func TimeToUnixMs(t time.Time) int64 {
-	return t.UnixNano() / 1000000
-}
-
-func UnixMsToTime(u int64) time.Time {
-	secs := u / 1000
-	nanos := u - (secs * 1000)
-	return time.Unix(secs, nanos)
-}



[2/7] incubator-htrace git commit: HTRACE-357. Rename htrace-htraced/go/src/org/apache/htrace to htrace-htraced/go/src/htrace (Colin Patrick McCabe via iwasakims)

Posted by iw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/datastore_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/datastore_test.go b/htrace-htraced/go/src/org/apache/htrace/htraced/datastore_test.go
deleted file mode 100644
index 281ee2d..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/datastore_test.go
+++ /dev/null
@@ -1,761 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bytes"
-	"encoding/json"
-	"math/rand"
-	htrace "org/apache/htrace/client"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"org/apache/htrace/test"
-	"os"
-	"reflect"
-	"sort"
-	"testing"
-	"time"
-)
-
-// Test creating and tearing down a datastore.
-func TestCreateDatastore(t *testing.T) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestCreateDatastore",
-		DataDirs: make([]string, 3)}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-}
-
-var SIMPLE_TEST_SPANS []common.Span = []common.Span{
-	common.Span{Id: common.TestId("00000000000000000000000000000001"),
-		SpanData: common.SpanData{
-			Begin:       123,
-			End:         456,
-			Description: "getFileDescriptors",
-			Parents:     []common.SpanId{},
-			TracerId:    "firstd",
-		}},
-	common.Span{Id: common.TestId("00000000000000000000000000000002"),
-		SpanData: common.SpanData{
-			Begin:       125,
-			End:         200,
-			Description: "openFd",
-			Parents:     []common.SpanId{common.TestId("00000000000000000000000000000001")},
-			TracerId:    "secondd",
-		}},
-	common.Span{Id: common.TestId("00000000000000000000000000000003"),
-		SpanData: common.SpanData{
-			Begin:       200,
-			End:         456,
-			Description: "passFd",
-			Parents:     []common.SpanId{common.TestId("00000000000000000000000000000001")},
-			TracerId:    "thirdd",
-		}},
-}
-
-func createSpans(spans []common.Span, store *dataStore) {
-	ing := store.NewSpanIngestor(store.lg, "127.0.0.1", "")
-	for idx := range spans {
-		ing.IngestSpan(&spans[idx])
-	}
-	ing.Close(time.Now())
-	store.WrittenSpans.Waits(int64(len(spans)))
-}
-
-// Test creating a datastore and adding some spans.
-func TestDatastoreWriteAndRead(t *testing.T) {
-	t.Parallel()
-	htraceBld := &MiniHTracedBuilder{Name: "TestDatastoreWriteAndRead",
-		Cnf: map[string]string{
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
-		},
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	createSpans(SIMPLE_TEST_SPANS, ht.Store)
-
-	span := ht.Store.FindSpan(common.TestId("00000000000000000000000000000001"))
-	if span == nil {
-		t.Fatal()
-	}
-	if !span.Id.Equal(common.TestId("00000000000000000000000000000001")) {
-		t.Fatal()
-	}
-	common.ExpectSpansEqual(t, &SIMPLE_TEST_SPANS[0], span)
-	children := ht.Store.FindChildren(common.TestId("00000000000000000000000000000001"), 1)
-	if len(children) != 1 {
-		t.Fatalf("expected 1 child, but got %d\n", len(children))
-	}
-	children = ht.Store.FindChildren(common.TestId("00000000000000000000000000000001"), 2)
-	if len(children) != 2 {
-		t.Fatalf("expected 2 children, but got %d\n", len(children))
-	}
-	sort.Sort(common.SpanIdSlice(children))
-	if !children[0].Equal(common.TestId("00000000000000000000000000000002")) {
-		t.Fatal()
-	}
-	if !children[1].Equal(common.TestId("00000000000000000000000000000003")) {
-		t.Fatal()
-	}
-}
-
-func testQuery(t *testing.T, ht *MiniHTraced, query *common.Query,
-		expectedSpans []common.Span) {
-	testQueryExt(t, ht, query, expectedSpans, nil)
-}
-
-func testQueryExt(t *testing.T, ht *MiniHTraced, query *common.Query,
-	expectedSpans []common.Span, expectedNumScanned []int) {
-	spans, err, numScanned := ht.Store.HandleQuery(query)
-	if err != nil {
-		t.Fatalf("Query %s failed: %s\n", query.String(), err.Error())
-	}
-	expectedBuf := new(bytes.Buffer)
-	dec := json.NewEncoder(expectedBuf)
-	err = dec.Encode(expectedSpans)
-	if err != nil {
-		t.Fatalf("Failed to encode expectedSpans to JSON: %s\n", err.Error())
-	}
-	spansBuf := new(bytes.Buffer)
-	dec = json.NewEncoder(spansBuf)
-	err = dec.Encode(spans)
-	if err != nil {
-		t.Fatalf("Failed to encode result spans to JSON: %s\n", err.Error())
-	}
-	t.Logf("len(spans) = %d, len(expectedSpans) = %d\n", len(spans),
-		len(expectedSpans))
-	common.ExpectStrEqual(t, string(expectedBuf.Bytes()), string(spansBuf.Bytes()))
-	if expectedNumScanned != nil {
-		if !reflect.DeepEqual(expectedNumScanned, numScanned) {
-			t.Fatalf("Invalid values for numScanned: got %v, expected %v\n",
-					expectedNumScanned, numScanned)
-		}
-	}
-}
-
-// Test queries on the datastore.
-func TestSimpleQuery(t *testing.T) {
-	t.Parallel()
-	htraceBld := &MiniHTracedBuilder{Name: "TestSimpleQuery",
-		Cnf: map[string]string{
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
-		},
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	createSpans(SIMPLE_TEST_SPANS, ht.Store)
-
-	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
-
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.GREATER_THAN_OR_EQUALS,
-				Field: common.BEGIN_TIME,
-				Val:   "125",
-			},
-		},
-		Lim: 5,
-	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[2]})
-}
-
-func TestQueries2(t *testing.T) {
-	t.Parallel()
-	htraceBld := &MiniHTracedBuilder{Name: "TestQueries2",
-		Cnf: map[string]string{
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
-		},
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	createSpans(SIMPLE_TEST_SPANS, ht.Store)
-	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.LESS_THAN_OR_EQUALS,
-				Field: common.BEGIN_TIME,
-				Val:   "125",
-			},
-		},
-		Lim: 5,
-	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[0]})
-
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.LESS_THAN_OR_EQUALS,
-				Field: common.BEGIN_TIME,
-				Val:   "125",
-			},
-			common.Predicate{
-				Op:    common.EQUALS,
-				Field: common.DESCRIPTION,
-				Val:   "getFileDescriptors",
-			},
-		},
-		Lim: 2,
-	}, []common.Span{SIMPLE_TEST_SPANS[0]})
-
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.EQUALS,
-				Field: common.DESCRIPTION,
-				Val:   "getFileDescriptors",
-			},
-		},
-		Lim: 2,
-	}, []common.Span{SIMPLE_TEST_SPANS[0]})
-}
-
-func TestQueries3(t *testing.T) {
-	t.Parallel()
-	htraceBld := &MiniHTracedBuilder{Name: "TestQueries3",
-		Cnf: map[string]string{
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
-		},
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	createSpans(SIMPLE_TEST_SPANS, ht.Store)
-	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.CONTAINS,
-				Field: common.DESCRIPTION,
-				Val:   "Fd",
-			},
-			common.Predicate{
-				Op:    common.GREATER_THAN_OR_EQUALS,
-				Field: common.BEGIN_TIME,
-				Val:   "100",
-			},
-		},
-		Lim: 5,
-	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[2]})
-
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.LESS_THAN_OR_EQUALS,
-				Field: common.SPAN_ID,
-				Val:   common.TestId("00000000000000000000000000000000").String(),
-			},
-		},
-		Lim: 200,
-	}, []common.Span{})
-
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.LESS_THAN_OR_EQUALS,
-				Field: common.SPAN_ID,
-				Val:   common.TestId("00000000000000000000000000000002").String(),
-			},
-		},
-		Lim: 200,
-	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[0]})
-}
-
-func TestQueries4(t *testing.T) {
-	t.Parallel()
-	htraceBld := &MiniHTracedBuilder{Name: "TestQueries4",
-		Cnf: map[string]string{
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
-		},
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	createSpans(SIMPLE_TEST_SPANS, ht.Store)
-
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.GREATER_THAN,
-				Field: common.BEGIN_TIME,
-				Val:   "125",
-			},
-		},
-		Lim: 5,
-	}, []common.Span{SIMPLE_TEST_SPANS[2]})
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.GREATER_THAN_OR_EQUALS,
-				Field: common.DESCRIPTION,
-				Val:   "openFd",
-			},
-		},
-		Lim: 2,
-	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[2]})
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.GREATER_THAN,
-				Field: common.DESCRIPTION,
-				Val:   "openFd",
-			},
-		},
-		Lim: 2,
-	}, []common.Span{SIMPLE_TEST_SPANS[2]})
-}
-
-var TEST_QUERIES5_SPANS []common.Span = []common.Span{
-	common.Span{Id: common.TestId("10000000000000000000000000000001"),
-		SpanData: common.SpanData{
-			Begin:       123,
-			End:         456,
-			Description: "span1",
-			Parents:     []common.SpanId{},
-			TracerId:    "myTracer",
-		}},
-	common.Span{Id: common.TestId("10000000000000000000000000000002"),
-		SpanData: common.SpanData{
-			Begin:       123,
-			End:         200,
-			Description: "span2",
-			Parents:     []common.SpanId{common.TestId("10000000000000000000000000000001")},
-			TracerId:    "myTracer",
-		}},
-	common.Span{Id: common.TestId("10000000000000000000000000000003"),
-		SpanData: common.SpanData{
-			Begin:       124,
-			End:         457,
-			Description: "span3",
-			Parents:     []common.SpanId{common.TestId("10000000000000000000000000000001")},
-			TracerId:    "myTracer",
-		}},
-}
-
-func TestQueries5(t *testing.T) {
-	t.Parallel()
-	htraceBld := &MiniHTracedBuilder{Name: "TestQueries5",
-		WrittenSpans: common.NewSemaphore(0),
-		DataDirs: make([]string, 1),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	createSpans(TEST_QUERIES5_SPANS, ht.Store)
-
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.GREATER_THAN,
-				Field: common.BEGIN_TIME,
-				Val:   "123",
-			},
-		},
-		Lim: 5,
-	}, []common.Span{TEST_QUERIES5_SPANS[2]})
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.GREATER_THAN,
-				Field: common.END_TIME,
-				Val:   "200",
-			},
-		},
-		Lim: 500,
-	}, []common.Span{TEST_QUERIES5_SPANS[0], TEST_QUERIES5_SPANS[2]})
-
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.LESS_THAN_OR_EQUALS,
-				Field: common.END_TIME,
-				Val:   "999",
-			},
-		},
-		Lim: 500,
-	}, []common.Span{TEST_QUERIES5_SPANS[2],
-		TEST_QUERIES5_SPANS[0],
-		TEST_QUERIES5_SPANS[1],
-	})
-}
-
-func BenchmarkDatastoreWrites(b *testing.B) {
-	htraceBld := &MiniHTracedBuilder{Name: "BenchmarkDatastoreWrites",
-		Cnf: map[string]string{
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
-			conf.HTRACE_LOG_LEVEL:                     "INFO",
-		},
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		b.Fatalf("Error creating MiniHTraced: %s\n", err.Error())
-	}
-	ht.Store.lg.Infof("BenchmarkDatastoreWrites: b.N = %d\n", b.N)
-	defer func() {
-		if r := recover(); r != nil {
-			ht.Store.lg.Infof("panic: %s\n", r.(error))
-		}
-		ht.Close()
-	}()
-	rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
-	allSpans := make([]*common.Span, b.N)
-	for n := range allSpans {
-		allSpans[n] = test.NewRandomSpan(rnd, allSpans[0:n])
-	}
-
-	// Reset the timer to avoid including the time required to create new
-	// random spans in the benchmark total.
-	b.ResetTimer()
-
-	// Write many random spans.
-	ing := ht.Store.NewSpanIngestor(ht.Store.lg, "127.0.0.1", "")
-	for n := 0; n < b.N; n++ {
-		ing.IngestSpan(allSpans[n])
-	}
-	ing.Close(time.Now())
-	// Wait for all the spans to be written.
-	ht.Store.WrittenSpans.Waits(int64(b.N))
-	assertNumWrittenEquals(b, ht.Store.msink, b.N)
-}
-
-func verifySuccessfulLoad(t *testing.T, allSpans common.SpanSlice,
-		dataDirs []string) {
-	htraceBld := &MiniHTracedBuilder{
-		Name: "TestReloadDataStore#verifySuccessfulLoad",
-		DataDirs: dataDirs,
-		KeepDataDirsOnClose: true,
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	defer hcl.Close()
-	for i := 0; i < len(allSpans); i++ {
-		span, err := hcl.FindSpan(allSpans[i].Id)
-		if err != nil {
-			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
-		}
-		common.ExpectSpansEqual(t, allSpans[i], span)
-	}
-	// Look up the spans we wrote.
-	var span *common.Span
-	for i := 0; i < len(allSpans); i++ {
-		span, err = hcl.FindSpan(allSpans[i].Id)
-		if err != nil {
-			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
-		}
-		common.ExpectSpansEqual(t, allSpans[i], span)
-	}
-}
-
-func verifyFailedLoad(t *testing.T, dataDirs []string, expectedErr string) {
-	htraceBld := &MiniHTracedBuilder{
-		Name: "TestReloadDataStore#verifyFailedLoad",
-		DataDirs: dataDirs,
-		KeepDataDirsOnClose: true,
-	}
-	_, err := htraceBld.Build()
-	if err == nil {
-		t.Fatalf("expected failure to load, but the load succeeded.")
-	}
-	common.AssertErrContains(t, err, expectedErr)
-}
-
-func TestReloadDataStore(t *testing.T) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestReloadDataStore",
-		Cnf: map[string]string{
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
-		},
-		DataDirs:            make([]string, 2),
-		KeepDataDirsOnClose: true,
-		WrittenSpans:        common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	dataDirs := make([]string, len(ht.DataDirs))
-	copy(dataDirs, ht.DataDirs)
-	defer func() {
-		if ht != nil {
-			ht.Close()
-		}
-		for i := range dataDirs {
-			os.RemoveAll(dataDirs[i])
-		}
-	}()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	hcnf := ht.Cnf.Clone()
-
-	// Create some random trace spans.
-	NUM_TEST_SPANS := 5
-	allSpans := createRandomTestSpans(NUM_TEST_SPANS)
-	err = hcl.WriteSpans(allSpans)
-	if err != nil {
-		t.Fatalf("WriteSpans failed: %s\n", err.Error())
-	}
-	ht.Store.WrittenSpans.Waits(int64(NUM_TEST_SPANS))
-
-	// Look up the spans we wrote.
-	var span *common.Span
-	for i := 0; i < NUM_TEST_SPANS; i++ {
-		span, err = hcl.FindSpan(allSpans[i].Id)
-		if err != nil {
-			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
-		}
-		common.ExpectSpansEqual(t, allSpans[i], span)
-	}
-	hcl.Close()
-	ht.Close()
-	ht = nil
-
-	// Verify that we can reload the datastore, even if we configure the data
-	// directories in a different order.
-	verifySuccessfulLoad(t, allSpans, []string{dataDirs[1], dataDirs[0]})
-
-	// If we try to reload the datastore with only one directory, it won't work
-	// (we need both).
-	verifyFailedLoad(t, []string{dataDirs[1]},
-		"The TotalShards field of all shards is 2, but we have 1 shards.")
-
-	// Test that we give an intelligent error message when 0 directories are
-	// configured.
-	verifyFailedLoad(t, []string{}, "No shard directories found.")
-
-	// Can't specify the same directory more than once... will get "lock
-	// already held by process"
-	verifyFailedLoad(t, []string{dataDirs[0], dataDirs[1], dataDirs[1]},
-		" already held by process.")
-
-	// Open the datastore and modify it to have the wrong DaemonId
-	dld := NewDataStoreLoader(hcnf)
-	defer func() {
-		if dld != nil {
-			dld.Close()
-			dld = nil
-		}
-	}()
-	dld.LoadShards()
-	sinfo, err := dld.shards[0].readShardInfo()
-	if err != nil {
-		t.Fatalf("error reading shard info for shard %s: %s\n",
-			dld.shards[0].path, err.Error())
-	}
-	newDaemonId := sinfo.DaemonId + 1
-	dld.lg.Infof("Read %s from shard %s.  Changing daemonId to 0x%016x\n.",
-		asJson(sinfo), dld.shards[0].path, newDaemonId)
-	sinfo.DaemonId = newDaemonId
-	err = dld.shards[0].writeShardInfo(sinfo)
-	if err != nil {
-		t.Fatalf("error writing shard info for shard %s: %s\n",
-			dld.shards[0].path, err.Error())
-	}
-	dld.Close()
-	dld = nil
-	verifyFailedLoad(t, dataDirs, "DaemonId mismatch.")
-
-	// Open the datastore and modify it to have the wrong TotalShards
-	dld = NewDataStoreLoader(hcnf)
-	dld.LoadShards()
-	sinfo, err = dld.shards[0].readShardInfo()
-	if err != nil {
-		t.Fatalf("error reading shard info for shard %s: %s\n",
-			dld.shards[0].path, err.Error())
-	}
-	newDaemonId = sinfo.DaemonId - 1
-	dld.lg.Infof("Read %s from shard %s.  Changing daemonId to 0x%016x, " +
-		"TotalShards to 3\n.",
-		asJson(sinfo), dld.shards[0].path, newDaemonId)
-	sinfo.DaemonId = newDaemonId
-	sinfo.TotalShards = 3
-	err = dld.shards[0].writeShardInfo(sinfo)
-	if err != nil {
-		t.Fatalf("error writing shard info for shard %s: %s\n",
-			dld.shards[0].path, err.Error())
-	}
-	dld.Close()
-	dld = nil
-	verifyFailedLoad(t, dataDirs, "TotalShards mismatch.")
-
-	// Open the datastore and modify it to have the wrong LayoutVersion
-	dld = NewDataStoreLoader(hcnf)
-	dld.LoadShards()
-	for shardIdx := range(dld.shards) {
-		sinfo, err = dld.shards[shardIdx].readShardInfo()
-		if err != nil {
-			t.Fatalf("error reading shard info for shard %s: %s\n",
-				dld.shards[shardIdx].path, err.Error())
-		}
-		dld.lg.Infof("Read %s from shard %s.  Changing TotalShards to 2, " +
-			"LayoutVersion to 2\n", asJson(sinfo), dld.shards[shardIdx].path)
-		sinfo.TotalShards = 2
-		sinfo.LayoutVersion = 2
-		err = dld.shards[shardIdx].writeShardInfo(sinfo)
-		if err != nil {
-			t.Fatalf("error writing shard info for shard %s: %s\n",
-				dld.shards[0].path, err.Error())
-		}
-	}
-	dld.Close()
-	dld = nil
-	verifyFailedLoad(t, dataDirs, "The layout version of all shards is 2, " +
-		"but we only support")
-
-	// It should work with data.store.clear set.
-	htraceBld = &MiniHTracedBuilder{
-		Name: "TestReloadDataStore#clear",
-		DataDirs: dataDirs,
-		KeepDataDirsOnClose: true,
-		Cnf: map[string]string{conf.HTRACE_DATA_STORE_CLEAR: "true"},
-	}
-	ht, err = htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-}
-
-func TestQueriesWithContinuationTokens1(t *testing.T) {
-	t.Parallel()
-	htraceBld := &MiniHTracedBuilder{Name: "TestQueriesWithContinuationTokens1",
-		Cnf: map[string]string{
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
-		},
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	createSpans(SIMPLE_TEST_SPANS, ht.Store)
-	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
-	// Adding a prev value to this query excludes the first result that we
-	// would normally get.
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.GREATER_THAN,
-				Field: common.BEGIN_TIME,
-				Val:   "120",
-			},
-		},
-		Lim:  5,
-		Prev: &SIMPLE_TEST_SPANS[0],
-	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[2]})
-
-	// There is only one result from an EQUALS query on SPAN_ID.
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.EQUALS,
-				Field: common.SPAN_ID,
-				Val:   common.TestId("00000000000000000000000000000001").String(),
-			},
-		},
-		Lim:  100,
-		Prev: &SIMPLE_TEST_SPANS[0],
-	}, []common.Span{})
-
-	// When doing a LESS_THAN_OR_EQUALS search, we still don't get back the
-	// span we pass as a continuation token. (Primary index edition).
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.LESS_THAN_OR_EQUALS,
-				Field: common.SPAN_ID,
-				Val:   common.TestId("00000000000000000000000000000002").String(),
-			},
-		},
-		Lim:  100,
-		Prev: &SIMPLE_TEST_SPANS[1],
-	}, []common.Span{SIMPLE_TEST_SPANS[0]})
-
-	// When doing a GREATER_THAN_OR_EQUALS search, we still don't get back the
-	// span we pass as a continuation token. (Secondary index edition).
-	testQuery(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.GREATER_THAN,
-				Field: common.DURATION,
-				Val:   "0",
-			},
-		},
-		Lim:  100,
-		Prev: &SIMPLE_TEST_SPANS[1],
-	}, []common.Span{SIMPLE_TEST_SPANS[2], SIMPLE_TEST_SPANS[0]})
-}
-
-func TestQueryRowsScanned(t *testing.T) {
-	t.Parallel()
-	htraceBld := &MiniHTracedBuilder{Name: "TestQueryRowsScanned",
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	createSpans(SIMPLE_TEST_SPANS, ht.Store)
-	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
-	testQueryExt(t, ht, &common.Query{
-		Predicates: []common.Predicate{
-			common.Predicate{
-				Op:    common.EQUALS,
-				Field: common.SPAN_ID,
-				Val:   common.TestId("00000000000000000000000000000001").String(),
-			},
-		},
-		Lim:  100,
-		Prev: nil,
-	}, []common.Span{SIMPLE_TEST_SPANS[0]},
-	[]int{2, 1})
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/heartbeater.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/heartbeater.go b/htrace-htraced/go/src/org/apache/htrace/htraced/heartbeater.go
deleted file mode 100644
index 49a21ee..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/heartbeater.go
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"org/apache/htrace/common"
-	"sync"
-	"time"
-)
-
-type Heartbeater struct {
-	// The name of this heartbeater
-	name string
-
-	// How long to sleep between heartbeats, in milliseconds.
-	periodMs int64
-
-	// The logger to use.
-	lg *common.Logger
-
-	// The channels to send the heartbeat on.
-	targets []HeartbeatTarget
-
-	// Incoming requests to the heartbeater.  When this is closed, the
-	// heartbeater will exit.
-	req chan *HeartbeatTarget
-
-	wg sync.WaitGroup
-}
-
-type HeartbeatTarget struct {
-	// The name of the heartbeat target.
-	name string
-
-	// The channel for the heartbeat target.
-	targetChan chan interface{}
-}
-
-func (tgt *HeartbeatTarget) String() string {
-	return tgt.name
-}
-
-func NewHeartbeater(name string, periodMs int64, lg *common.Logger) *Heartbeater {
-	hb := &Heartbeater{
-		name:     name,
-		periodMs: periodMs,
-		lg:       lg,
-		targets:  make([]HeartbeatTarget, 0, 4),
-		req:      make(chan *HeartbeatTarget),
-	}
-	hb.wg.Add(1)
-	go hb.run()
-	return hb
-}
-
-func (hb *Heartbeater) AddHeartbeatTarget(tgt *HeartbeatTarget) {
-	hb.req <- tgt
-}
-
-func (hb *Heartbeater) Shutdown() {
-	close(hb.req)
-	hb.wg.Wait()
-}
-
-func (hb *Heartbeater) String() string {
-	return hb.name
-}
-
-func (hb *Heartbeater) run() {
-	defer func() {
-		hb.lg.Debugf("%s: exiting.\n", hb.String())
-		hb.wg.Done()
-	}()
-	period := time.Duration(hb.periodMs) * time.Millisecond
-	for {
-		periodEnd := time.Now().Add(period)
-		for {
-			timeToWait := periodEnd.Sub(time.Now())
-			if timeToWait <= 0 {
-				break
-			} else if timeToWait > period {
-				// Smooth over jitter or clock changes
-				timeToWait = period
-				periodEnd = time.Now().Add(period)
-			}
-			select {
-			case tgt, open := <-hb.req:
-				if !open {
-					return
-				}
-				hb.targets = append(hb.targets, *tgt)
-				hb.lg.Debugf("%s: added %s.\n", hb.String(), tgt.String())
-			case <-time.After(timeToWait):
-			}
-		}
-		for targetIdx := range hb.targets {
-			select {
-			case hb.targets[targetIdx].targetChan <- nil:
-			default:
-				// We failed to send a heartbeat because the other goroutine was busy and
-				// hasn't cleared the previous one from its channel.  This could indicate a
-				// stuck goroutine.
-				hb.lg.Infof("%s: could not send heartbeat to %s.\n",
-					hb.String(), hb.targets[targetIdx])
-			}
-		}
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/heartbeater_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/heartbeater_test.go b/htrace-htraced/go/src/org/apache/htrace/htraced/heartbeater_test.go
deleted file mode 100644
index cbde7fc..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/heartbeater_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"testing"
-	"time"
-)
-
-func TestHeartbeaterStartupShutdown(t *testing.T) {
-	cnfBld := conf.Builder{
-		Values:   conf.TEST_VALUES(),
-		Defaults: conf.DEFAULTS,
-	}
-	cnf, err := cnfBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create conf: %s", err.Error())
-	}
-	lg := common.NewLogger("heartbeater", cnf)
-	hb := NewHeartbeater("ExampleHeartbeater", 1, lg)
-	if hb.String() != "ExampleHeartbeater" {
-		t.Fatalf("hb.String() returned %s instead of %s\n", hb.String(), "ExampleHeartbeater")
-	}
-	hb.Shutdown()
-}
-
-// The number of milliseconds between heartbeats
-const HEARTBEATER_PERIOD = 5
-
-// The number of heartbeats to send in the test.
-const NUM_TEST_HEARTBEATS = 3
-
-func TestHeartbeaterSendsHeartbeats(t *testing.T) {
-	cnfBld := conf.Builder{
-		Values:   conf.TEST_VALUES(),
-		Defaults: conf.DEFAULTS,
-	}
-	cnf, err := cnfBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create conf: %s", err.Error())
-	}
-	lg := common.NewLogger("heartbeater", cnf)
-	// The minimum amount of time which the heartbeater test should take
-	MINIMUM_TEST_DURATION := time.Millisecond * (NUM_TEST_HEARTBEATS * HEARTBEATER_PERIOD)
-	duration := MINIMUM_TEST_DURATION
-	for duration <= MINIMUM_TEST_DURATION {
-		start := time.Now()
-		testHeartbeaterSendsHeartbeatsImpl(t, lg)
-		end := time.Now()
-		duration = end.Sub(start)
-		lg.Debugf("Measured duration: %v; minimum expected duration: %v\n",
-			duration, MINIMUM_TEST_DURATION)
-	}
-}
-
-func testHeartbeaterSendsHeartbeatsImpl(t *testing.T, lg *common.Logger) {
-	hb := NewHeartbeater("ExampleHeartbeater", HEARTBEATER_PERIOD, lg)
-	if hb.String() != "ExampleHeartbeater" {
-		t.Fatalf("hb.String() returned %s instead of %s\n", hb.String(), "ExampleHeartbeater")
-	}
-	testChan := make(chan interface{}, NUM_TEST_HEARTBEATS)
-	gotAllHeartbeats := make(chan bool)
-	hb.AddHeartbeatTarget(&HeartbeatTarget{
-		name:       "ExampleHeartbeatTarget",
-		targetChan: testChan,
-	})
-	go func() {
-		for i := 0; i < NUM_TEST_HEARTBEATS; i++ {
-			<-testChan
-		}
-		gotAllHeartbeats <- true
-		for i := 0; i < NUM_TEST_HEARTBEATS; i++ {
-			_, open := <-testChan
-			if !open {
-				return
-			}
-		}
-	}()
-	<-gotAllHeartbeats
-	hb.Shutdown()
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/hrpc.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/hrpc.go b/htrace-htraced/go/src/org/apache/htrace/htraced/hrpc.go
deleted file mode 100644
index ecd13d4..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/hrpc.go
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/binary"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"github.com/ugorji/go/codec"
-	"io"
-	"net"
-	"net/rpc"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"sync"
-	"sync/atomic"
-	"time"
-)
-
-const MAX_HRPC_HANDLERS = 32765
-
-// Handles HRPC calls
-type HrpcHandler struct {
-	lg    *common.Logger
-	store *dataStore
-}
-
-// The HRPC server
-type HrpcServer struct {
-	*rpc.Server
-	hand *HrpcHandler
-
-	// The listener we are using to accept new connections.
-	listener net.Listener
-
-	// A WaitGroup used to block until the HRPC server has exited.
-	exited sync.WaitGroup
-
-	// A channel containing server codecs to use.  This channel is fully
-	// buffered.  The number of entries it initially contains determines how
-	// many concurrent codecs we will have running at once.
-	cdcs chan *HrpcServerCodec
-
-	// Used to shut down
-	shutdown chan interface{}
-
-	// The I/O timeout to use when reading requests or sending responses.  This
-	// timeout does not apply to the time we spend processing the message.
-	ioTimeo time.Duration
-
-	// A count of all I/O errors that we have encountered since the server
-	// started.  This counts errors like improperly formatted message frames,
-	// but not errors like properly formatted but invalid messages.
-	// This count is updated from multiple goroutines via sync/atomic.
-	ioErrorCount uint64
-
-	// The test hooks to use, or nil during normal operation.
-	testHooks *hrpcTestHooks
-}
-
-type hrpcTestHooks struct {
-	// A callback we make right after calling Accept() but before reading from
-	// the new connection.
-	HandleAdmission func()
-}
-
-// A codec which encodes HRPC data via JSON.  This structure holds the context
-// for a particular client connection.
-type HrpcServerCodec struct {
-	lg *common.Logger
-
-	// The current connection.
-	conn net.Conn
-
-	// The HrpcServer which this connection is part of.
-	hsv *HrpcServer
-
-	// The message length we read from the header.
-	length uint32
-
-	// The number of messages this connection has handled.
-	numHandled int
-
-	// The buffer for reading requests.  These buffers are reused for multiple
-	// requests to avoid allocating memory.
-	buf []byte
-
-	// Configuration for msgpack decoding
-	msgpackHandle codec.MsgpackHandle
-}
-
-func asJson(val interface{}) string {
-	js, err := json.Marshal(val)
-	if err != nil {
-		return "encoding error: " + err.Error()
-	}
-	return string(js)
-}
-
-func newIoErrorWarn(cdc *HrpcServerCodec, val string) error {
-	return newIoError(cdc, val, common.WARN)
-}
-
-func newIoError(cdc *HrpcServerCodec, val string, level common.Level) error {
-	if cdc.lg.LevelEnabled(level) {
-		cdc.lg.Write(level, cdc.conn.RemoteAddr().String()+": "+val+"\n")
-	}
-	if level >= common.INFO {
-		atomic.AddUint64(&cdc.hsv.ioErrorCount, 1)
-	}
-	return errors.New(val)
-}
-
-func (cdc *HrpcServerCodec) ReadRequestHeader(req *rpc.Request) error {
-	hdr := common.HrpcRequestHeader{}
-	if cdc.lg.TraceEnabled() {
-		cdc.lg.Tracef("%s: Reading HRPC request header.\n", cdc.conn.RemoteAddr())
-	}
-	cdc.conn.SetDeadline(time.Now().Add(cdc.hsv.ioTimeo))
-	err := binary.Read(cdc.conn, binary.LittleEndian, &hdr)
-	if err != nil {
-		if err == io.EOF && cdc.numHandled > 0 {
-			return newIoError(cdc, fmt.Sprintf("Remote closed connection "+
-				"after writing %d message(s)", cdc.numHandled), common.DEBUG)
-		}
-		return newIoError(cdc,
-			fmt.Sprintf("Error reading request header: %s", err.Error()), common.WARN)
-	}
-	if cdc.lg.TraceEnabled() {
-		cdc.lg.Tracef("%s: Read HRPC request header %s\n",
-			cdc.conn.RemoteAddr(), asJson(&hdr))
-	}
-	if hdr.Magic != common.HRPC_MAGIC {
-		return newIoErrorWarn(cdc, fmt.Sprintf("Invalid request header: expected "+
-			"magic number of 0x%04x, but got 0x%04x", common.HRPC_MAGIC, hdr.Magic))
-	}
-	if hdr.Length > common.MAX_HRPC_BODY_LENGTH {
-		return newIoErrorWarn(cdc, fmt.Sprintf("Length prefix was too long.  "+
-			"Maximum length is %d, but we got %d.", common.MAX_HRPC_BODY_LENGTH,
-			hdr.Length))
-	}
-	req.ServiceMethod = common.HrpcMethodIdToMethodName(hdr.MethodId)
-	if req.ServiceMethod == "" {
-		return newIoErrorWarn(cdc, fmt.Sprintf("Unknown MethodID code 0x%04x",
-			hdr.MethodId))
-	}
-	req.Seq = hdr.Seq
-	cdc.length = hdr.Length
-	return nil
-}
-
-func (cdc *HrpcServerCodec) ReadRequestBody(body interface{}) error {
-	remoteAddr := cdc.conn.RemoteAddr().String()
-	if cdc.lg.TraceEnabled() {
-		cdc.lg.Tracef("%s: Reading HRPC %d-byte request body.\n",
-			remoteAddr, cdc.length)
-	}
-	if cap(cdc.buf) < int(cdc.length) {
-		var pow uint
-		for pow=0;(1<<pow) < int(cdc.length);pow++ {
-		}
-		cdc.buf = make([]byte, 0, 1<<pow)
-	}
-	_, err := io.ReadFull(cdc.conn, cdc.buf[:cdc.length])
-	if err != nil {
-		return newIoErrorWarn(cdc, fmt.Sprintf("Failed to read %d-byte "+
-			"request body: %s", cdc.length, err.Error()))
-	}
-	var zeroTime time.Time
-	cdc.conn.SetDeadline(zeroTime)
-
-	dec := codec.NewDecoderBytes(cdc.buf[:cdc.length], &cdc.msgpackHandle)
-	err = dec.Decode(body)
-	if cdc.lg.TraceEnabled() {
-		cdc.lg.Tracef("%s: read HRPC message: %s\n",
-			remoteAddr, asJson(&body))
-	}
-	req := body.(*common.WriteSpansReq)
-	if req == nil {
-		return nil
-	}
-	// We decode WriteSpans requests in a streaming fashion, to avoid overloading the garbage
-	// collector with a ton of trace spans all at once.
-	startTime := time.Now()
-	client, _, err := net.SplitHostPort(remoteAddr)
-	if err != nil {
-		return newIoErrorWarn(cdc, fmt.Sprintf("Failed to split host and port "+
-			"for %s: %s\n", remoteAddr, err.Error()))
-	}
-	hand := cdc.hsv.hand
-	ing := hand.store.NewSpanIngestor(hand.lg, client, req.DefaultTrid)
-	for spanIdx := 0; spanIdx < req.NumSpans; spanIdx++ {
-		var span *common.Span
-		err := dec.Decode(&span)
-		if err != nil {
-			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to decode span %d " +
-				"out of %d: %s\n", spanIdx, req.NumSpans, err.Error()))
-		}
-		ing.IngestSpan(span)
-	}
-	ing.Close(startTime)
-	return nil
-}
-
-var EMPTY []byte = make([]byte, 0)
-
-func (cdc *HrpcServerCodec) WriteResponse(resp *rpc.Response, msg interface{}) error {
-	cdc.conn.SetDeadline(time.Now().Add(cdc.hsv.ioTimeo))
-	var err error
-	buf := EMPTY
-	if msg != nil {
-		w := bytes.NewBuffer(make([]byte, 0, 128))
-		enc := codec.NewEncoder(w, &cdc.msgpackHandle)
-		err := enc.Encode(msg)
-		if err != nil {
-			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to marshal "+
-				"response message: %s", err.Error()))
-		}
-		buf = w.Bytes()
-	}
-	hdr := common.HrpcResponseHeader{}
-	hdr.MethodId = common.HrpcMethodNameToId(resp.ServiceMethod)
-	hdr.Seq = resp.Seq
-	hdr.ErrLength = uint32(len(resp.Error))
-	hdr.Length = uint32(len(buf))
-	writer := bufio.NewWriterSize(cdc.conn, 256)
-	err = binary.Write(writer, binary.LittleEndian, &hdr)
-	if err != nil {
-		return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write response "+
-			"header: %s", err.Error()))
-	}
-	if hdr.ErrLength > 0 {
-		_, err = io.WriteString(writer, resp.Error)
-		if err != nil {
-			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write error "+
-				"string: %s", err.Error()))
-		}
-	}
-	if hdr.Length > 0 {
-		var length int
-		length, err = writer.Write(buf)
-		if err != nil {
-			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write response "+
-				"message: %s", err.Error()))
-		}
-		if uint32(length) != hdr.Length {
-			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write all of "+
-				"response message: %s", err.Error()))
-		}
-	}
-	err = writer.Flush()
-	if err != nil {
-		return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write the response "+
-			"bytes: %s", err.Error()))
-	}
-	cdc.numHandled++
-	return nil
-}
-
-func (cdc *HrpcServerCodec) Close() error {
-	err := cdc.conn.Close()
-	cdc.conn = nil
-	cdc.length = 0
-	cdc.numHandled = 0
-	cdc.hsv.cdcs <- cdc
-	return err
-}
-
-func (hand *HrpcHandler) WriteSpans(req *common.WriteSpansReq,
-		resp *common.WriteSpansResp) (err error) {
-	// Nothing to do here; WriteSpans is handled in ReadRequestBody.
-	return nil
-}
-
-func CreateHrpcServer(cnf *conf.Config, store *dataStore,
-	testHooks *hrpcTestHooks) (*HrpcServer, error) {
-	lg := common.NewLogger("hrpc", cnf)
-	numHandlers := cnf.GetInt(conf.HTRACE_NUM_HRPC_HANDLERS)
-	if numHandlers < 1 {
-		lg.Warnf("%s must be positive: using 1 handler.\n", conf.HTRACE_NUM_HRPC_HANDLERS)
-		numHandlers = 1
-	}
-	if numHandlers > MAX_HRPC_HANDLERS {
-		lg.Warnf("%s cannot be more than %d: using %d handlers\n",
-			conf.HTRACE_NUM_HRPC_HANDLERS, MAX_HRPC_HANDLERS, MAX_HRPC_HANDLERS)
-		numHandlers = MAX_HRPC_HANDLERS
-	}
-	hsv := &HrpcServer{
-		Server: rpc.NewServer(),
-		hand: &HrpcHandler{
-			lg:    lg,
-			store: store,
-		},
-		cdcs:     make(chan *HrpcServerCodec, numHandlers),
-		shutdown: make(chan interface{}),
-		ioTimeo: time.Millisecond *
-			time.Duration(cnf.GetInt64(conf.HTRACE_HRPC_IO_TIMEOUT_MS)),
-		testHooks: testHooks,
-	}
-	for i := 0; i < numHandlers; i++ {
-		hsv.cdcs <- &HrpcServerCodec{
-			lg:  lg,
-			hsv: hsv,
-			msgpackHandle: codec.MsgpackHandle {
-				WriteExt: true,
-			},
-		}
-	}
-	var err error
-	hsv.listener, err = net.Listen("tcp", cnf.Get(conf.HTRACE_HRPC_ADDRESS))
-	if err != nil {
-		return nil, err
-	}
-	hsv.Server.Register(hsv.hand)
-	hsv.exited.Add(1)
-	go hsv.run()
-	lg.Infof("Started HRPC server on %s with %d handler routines. "+
-		"ioTimeo=%s.\n", hsv.listener.Addr().String(), numHandlers,
-		hsv.ioTimeo.String())
-	return hsv, nil
-}
-
-func (hsv *HrpcServer) run() {
-	lg := hsv.hand.lg
-	srvAddr := hsv.listener.Addr().String()
-	defer func() {
-		lg.Infof("HrpcServer on %s exiting\n", srvAddr)
-		hsv.exited.Done()
-	}()
-	for {
-		select {
-		case cdc := <-hsv.cdcs:
-			conn, err := hsv.listener.Accept()
-			if err != nil {
-				lg.Errorf("HrpcServer on %s got accept error: %s\n", srvAddr, err.Error())
-				hsv.cdcs <- cdc // never blocks; there is always sufficient buffer space
-				continue
-			}
-			if lg.TraceEnabled() {
-				lg.Tracef("%s: Accepted HRPC connection.\n", conn.RemoteAddr())
-			}
-			cdc.conn = conn
-			cdc.numHandled = 0
-			if hsv.testHooks != nil && hsv.testHooks.HandleAdmission != nil {
-				hsv.testHooks.HandleAdmission()
-			}
-			go hsv.ServeCodec(cdc)
-		case <-hsv.shutdown:
-			return
-		}
-	}
-}
-
-func (hsv *HrpcServer) Addr() net.Addr {
-	return hsv.listener.Addr()
-}
-
-func (hsv *HrpcServer) GetNumIoErrors() uint64 {
-	return atomic.LoadUint64(&hsv.ioErrorCount)
-}
-
-func (hsv *HrpcServer) Close() {
-	close(hsv.shutdown)
-	hsv.listener.Close()
-	hsv.exited.Wait()
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/htraced.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/htraced.go b/htrace-htraced/go/src/org/apache/htrace/htraced/htraced.go
deleted file mode 100644
index 35ee753..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/htraced.go
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bufio"
-	"encoding/json"
-	"fmt"
-	"github.com/alecthomas/kingpin"
-	"github.com/jmhodges/levigo"
-	"net"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"os"
-	"runtime"
-	"time"
-)
-
-var RELEASE_VERSION string
-var GIT_VERSION string
-
-const USAGE = `htraced: the HTrace server daemon.
-
-htraced receives trace spans sent from HTrace clients.  It exposes a REST
-interface which others can query.  It also runs a web server with a graphical
-user interface.  htraced stores its span data in levelDB files on the local
-disks.
-
-Usage:
---help: this help message
-
--Dk=v: set configuration key 'k' to value 'v'
-For example -Dweb.address=127.0.0.1:8080 sets the web address to localhost,
-port 8080.  -Dlog.level=DEBUG will set the default log level to DEBUG.
-
--Dk: set configuration key 'k' to 'true'
-
-Normally, configuration options should be set in the ` + conf.CONFIG_FILE_NAME + `
-configuration file.  We find this file by searching the paths in the 
-` + conf.HTRACED_CONF_DIR + `. The command-line options are just an alternate way
-of setting configuration when launching the daemon.
-`
-
-func main() {
-	// Load the htraced configuration.
-	// This also parses the -Dfoo=bar command line arguments and removes them
-	// from os.Argv.
-	cnf, cnfLog := conf.LoadApplicationConfig("htraced.")
-
-	// Parse the remaining command-line arguments.
-	app := kingpin.New(os.Args[0], USAGE)
-	version := app.Command("version", "Print server version and exit.")
-	cmd := kingpin.MustParse(app.Parse(os.Args[1:]))
-
-	// Handle the "version" command-line argument.
-	if cmd == version.FullCommand() {
-		fmt.Printf("Running htraced %s [%s].\n", RELEASE_VERSION, GIT_VERSION)
-		os.Exit(0)
-	}
-
-	// Open the HTTP port.
-	// We want to do this first, before initializing the datastore or setting up
-	// logging.  That way, if someone accidentally starts two daemons with the
-	// same config file, the second invocation will exit with a "port in use"
-	// error rather than potentially disrupting the first invocation.
-	rstListener, listenErr := net.Listen("tcp", cnf.Get(conf.HTRACE_WEB_ADDRESS))
-	if listenErr != nil {
-		fmt.Fprintf(os.Stderr, "Error opening HTTP port: %s\n",
-			listenErr.Error())
-		os.Exit(1)
-	}
-
-	// Print out the startup banner and information about the daemon
-	// configuration.
-	lg := common.NewLogger("main", cnf)
-	defer lg.Close()
-	lg.Infof("*** Starting htraced %s [%s]***\n", RELEASE_VERSION, GIT_VERSION)
-	scanner := bufio.NewScanner(cnfLog)
-	for scanner.Scan() {
-		lg.Infof(scanner.Text() + "\n")
-	}
-	common.InstallSignalHandlers(cnf)
-	if runtime.GOMAXPROCS(0) == 1 {
-		ncpu := runtime.NumCPU()
-		runtime.GOMAXPROCS(ncpu)
-		lg.Infof("setting GOMAXPROCS=%d\n", ncpu)
-	} else {
-		lg.Infof("GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0))
-	}
-	lg.Infof("leveldb version=%d.%d\n",
-		levigo.GetLevelDBMajorVersion(), levigo.GetLevelDBMinorVersion())
-
-	// Initialize the datastore.
-	store, err := CreateDataStore(cnf, nil)
-	if err != nil {
-		lg.Errorf("Error creating datastore: %s\n", err.Error())
-		os.Exit(1)
-	}
-	var rsv *RestServer
-	rsv, err = CreateRestServer(cnf, store, rstListener)
-	if err != nil {
-		lg.Errorf("Error creating REST server: %s\n", err.Error())
-		os.Exit(1)
-	}
-	var hsv *HrpcServer
-	if cnf.Get(conf.HTRACE_HRPC_ADDRESS) != "" {
-		hsv, err = CreateHrpcServer(cnf, store, nil)
-		if err != nil {
-			lg.Errorf("Error creating HRPC server: %s\n", err.Error())
-			os.Exit(1)
-		}
-	} else {
-		lg.Infof("Not starting HRPC server because no value was given for %s.\n",
-			conf.HTRACE_HRPC_ADDRESS)
-	}
-	naddr := cnf.Get(conf.HTRACE_STARTUP_NOTIFICATION_ADDRESS)
-	if naddr != "" {
-		notif := StartupNotification{
-			HttpAddr:  rsv.Addr().String(),
-			ProcessId: os.Getpid(),
-		}
-		if hsv != nil {
-			notif.HrpcAddr = hsv.Addr().String()
-		}
-		err = sendStartupNotification(naddr, &notif)
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "Failed to send startup notification: "+
-				"%s\n", err.Error())
-			os.Exit(1)
-		}
-	}
-	for {
-		time.Sleep(time.Duration(10) * time.Hour)
-	}
-}
-
-// A startup notification message that we optionally send on startup.
-// Used by unit tests.
-type StartupNotification struct {
-	HttpAddr  string
-	HrpcAddr  string
-	ProcessId int
-}
-
-func sendStartupNotification(naddr string, notif *StartupNotification) error {
-	conn, err := net.Dial("tcp", naddr)
-	if err != nil {
-		return err
-	}
-	defer func() {
-		if conn != nil {
-			conn.Close()
-		}
-	}()
-	var buf []byte
-	buf, err = json.Marshal(notif)
-	if err != nil {
-		return err
-	}
-	_, err = conn.Write(buf)
-	conn.Close()
-	conn = nil
-	return nil
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/loader.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/loader.go b/htrace-htraced/go/src/org/apache/htrace/htraced/loader.go
deleted file mode 100644
index 5914004..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/loader.go
+++ /dev/null
@@ -1,511 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"github.com/jmhodges/levigo"
-	"github.com/ugorji/go/codec"
-	"io"
-	"math"
-	"math/rand"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"os"
-	"strings"
-	"syscall"
-	"time"
-)
-
-// Routines for loading the datastore.
-
-// The leveldb key which has information about the shard.
-const SHARD_INFO_KEY = 'w'
-
-// A constant signifying that we don't know what the layout version is.
-const UNKNOWN_LAYOUT_VERSION = 0
-
-// The current layout version.  We cannot read layout versions newer than this.
-// We may sometimes be able to read older versions, but only by doing an
-// upgrade.
-const CURRENT_LAYOUT_VERSION = 3
-
-type DataStoreLoader struct {
-	// The dataStore logger.
-	lg *common.Logger
-
-	// True if we should clear the stored data.
-	ClearStored bool
-
-	// The shards that we're loading
-	shards []*ShardLoader
-
-	// The options to use for opening datastores in LevelDB.
-	openOpts *levigo.Options
-
-	// The read options to use for LevelDB.
-	readOpts *levigo.ReadOptions
-
-	// The write options to use for LevelDB.
-	writeOpts *levigo.WriteOptions
-}
-
-// Information about a Shard.
-type ShardInfo struct {
-	// The layout version of the datastore.
-	// We should always keep this field so that old software can recognize new
-	// layout versions, even if it can't read them.
-	LayoutVersion uint64
-
-	// A random number identifying this daemon.
-	DaemonId uint64
-
-	// The total number of shards in this datastore.
-	TotalShards uint32
-
-	// The index of this shard within the datastore.
-	ShardIndex uint32
-}
-
-// Create a new datastore loader. 
-// Initializes the loader, but does not load any leveldb instances.
-func NewDataStoreLoader(cnf *conf.Config) *DataStoreLoader {
-	dld := &DataStoreLoader{
-		lg: common.NewLogger("datastore", cnf),
-		ClearStored: cnf.GetBool(conf.HTRACE_DATA_STORE_CLEAR),
-	}
-	dld.readOpts = levigo.NewReadOptions()
-	dld.readOpts.SetFillCache(true)
-	dld.readOpts.SetVerifyChecksums(false)
-	dld.writeOpts = levigo.NewWriteOptions()
-	dld.writeOpts.SetSync(false)
-	dirsStr := cnf.Get(conf.HTRACE_DATA_STORE_DIRECTORIES)
-	rdirs := strings.Split(dirsStr, conf.PATH_LIST_SEP)
-	// Filter out empty entries
-	dirs := make([]string, 0, len(rdirs))
-	for i := range(rdirs) {
-		if strings.TrimSpace(rdirs[i]) != "" {
-			dirs = append(dirs, rdirs[i])
-		}
-	}
-	dld.shards = make([]*ShardLoader, len(dirs))
-	for i := range(dirs) {
-		dld.shards[i] = &ShardLoader{
-			dld: dld,
-			path: dirs[i] + conf.PATH_SEP + "db",
-		}
-	}
-	dld.openOpts = levigo.NewOptions()
-	cacheSize := cnf.GetInt(conf.HTRACE_LEVELDB_CACHE_SIZE)
-	dld.openOpts.SetCache(levigo.NewLRUCache(cacheSize))
-	dld.openOpts.SetParanoidChecks(false)
-	writeBufferSize := cnf.GetInt(conf.HTRACE_LEVELDB_WRITE_BUFFER_SIZE)
-	if writeBufferSize > 0 {
-		dld.openOpts.SetWriteBufferSize(writeBufferSize)
-	}
-	maxFdPerShard := dld.calculateMaxOpenFilesPerShard()
-	if maxFdPerShard > 0 {
-		dld.openOpts.SetMaxOpenFiles(maxFdPerShard)
-	}
-	return dld
-}
-
-func (dld *DataStoreLoader) Close() {
-	if dld.lg != nil {
-		dld.lg.Close()
-		dld.lg = nil
-	}
-	if dld.openOpts != nil {
-		dld.openOpts.Close()
-		dld.openOpts = nil
-	}
-	if dld.readOpts != nil {
-		dld.readOpts.Close()
-		dld.readOpts = nil
-	}
-	if dld.writeOpts != nil {
-		dld.writeOpts.Close()
-		dld.writeOpts = nil
-	}
-	if dld.shards != nil {
-		for i := range(dld.shards) {
-			if dld.shards[i] != nil {
-				dld.shards[i].Close()
-			}
-		}
-		dld.shards = nil
-	}
-}
-
-func (dld *DataStoreLoader) DisownResources() {
-	dld.lg = nil
-	dld.openOpts = nil
-	dld.readOpts = nil
-	dld.writeOpts = nil
-	dld.shards = nil
-}
-
-// The maximum number of file descriptors we'll use on non-datastore things.
-const NON_DATASTORE_FD_MAX = 300
-
-// The minimum number of file descriptors per shard we will set.  Setting fewer
-// than this number could trigger a bug in some early versions of leveldb.
-const MIN_FDS_PER_SHARD = 80
-
-func (dld *DataStoreLoader) calculateMaxOpenFilesPerShard() int {
-	var rlim syscall.Rlimit
-	err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim)
-	if err != nil {
-		dld.lg.Warnf("Unable to calculate maximum open files per shard: " +
-			"getrlimit failed: %s\n", err.Error())
-		return 0
-	}
-	// I think RLIMIT_NOFILE fits in 32 bits on all known operating systems,
-	// but there's no harm in being careful.  'int' in golang always holds at
-	// least 32 bits.
-	var maxFd int
-	if rlim.Cur > uint64(math.MaxInt32) {
-		maxFd = math.MaxInt32
-	} else {
-		maxFd = int(rlim.Cur)
-	}
-	if len(dld.shards) == 0 {
-		dld.lg.Warnf("Unable to calculate maximum open files per shard, " +
-			"since there are 0 shards configured.\n")
-		return 0
-	}
-	fdsPerShard := (maxFd - NON_DATASTORE_FD_MAX) / len(dld.shards)
-	if fdsPerShard < MIN_FDS_PER_SHARD {
-		dld.lg.Warnf("Expected to be able to use at least %d " +
-			"fds per shard, but we have %d shards and %d total fds to allocate, " +
-			"giving us only %d FDs per shard.", MIN_FDS_PER_SHARD,
-			len(dld.shards), maxFd - NON_DATASTORE_FD_MAX, fdsPerShard)
-		return 0
-	}
-	dld.lg.Infof("maxFd = %d.  Setting maxFdPerShard = %d\n",
-		maxFd, fdsPerShard)
-	return fdsPerShard
-}
-
-// Load information about all shards.
-func (dld *DataStoreLoader) LoadShards() {
-	for i := range(dld.shards) {
-		shd := dld.shards[i]
-		shd.load()
-	}
-}
-
-// Verify that the shard infos are consistent.
-// Reorders the shardInfo structures based on their ShardIndex.
-func (dld *DataStoreLoader) VerifyShardInfos() error {
-	if len(dld.shards) < 1 {
-		return errors.New("No shard directories found.")
-	}
-	// Make sure no shards had errors.
-	for i := range(dld.shards) {
-		shd := dld.shards[i]
-		if shd.infoErr != nil {
-			return shd.infoErr
-		}
-	}
-	// Make sure that if any shards are empty, all shards are empty.
-	emptyShards := ""
-	prefix := ""
-	for i := range(dld.shards) {
-		if dld.shards[i].info == nil {
-			emptyShards = prefix + dld.shards[i].path
-			prefix = ", "
-		}
-	}
-	if emptyShards != "" {
-		for i := range(dld.shards) {
-			if dld.shards[i].info != nil {
-				return errors.New(fmt.Sprintf("Shards %s were empty, but " +
-					"the other shards had data.", emptyShards))
-			}
-		}
-		// All shards are empty.
-		return nil
-	}
-	// Make sure that all shards have the same layout version, daemonId, and number of total
-	// shards.
-	layoutVersion := dld.shards[0].info.LayoutVersion
-	daemonId := dld.shards[0].info.DaemonId
-	totalShards := dld.shards[0].info.TotalShards
-	for i := 1; i < len(dld.shards); i++ {
-		shd := dld.shards[i]
-		if layoutVersion != shd.info.LayoutVersion {
-			return errors.New(fmt.Sprintf("Layout version mismatch.  Shard " +
-				"%s has layout version 0x%016x, but shard %s has layout " +
-				"version 0x%016x.",
-				dld.shards[0].path, layoutVersion, shd.path, shd.info.LayoutVersion))
-		}
-		if daemonId != shd.info.DaemonId {
-			return errors.New(fmt.Sprintf("DaemonId mismatch. Shard %s has " +
-			"daemonId 0x%016x, but shard %s has daemonId 0x%016x.",
-				dld.shards[0].path, daemonId, shd.path, shd.info.DaemonId))
-		}
-		if totalShards != shd.info.TotalShards {
-			return errors.New(fmt.Sprintf("TotalShards mismatch.  Shard %s has " +
-				"TotalShards = %d, but shard %s has TotalShards = %d.",
-				dld.shards[0].path, totalShards, shd.path, shd.info.TotalShards))
-		}
-		if shd.info.ShardIndex >= totalShards {
-			return errors.New(fmt.Sprintf("Invalid ShardIndex.  Shard %s has " +
-				"ShardIndex = %d, but TotalShards = %d.",
-				shd.path, shd.info.ShardIndex, shd.info.TotalShards))
-		}
-	}
-	if layoutVersion != CURRENT_LAYOUT_VERSION {
-		return errors.New(fmt.Sprintf("The layout version of all shards " +
-			"is %d, but we only support version %d.",
-			layoutVersion, CURRENT_LAYOUT_VERSION))
-	}
-	if totalShards != uint32(len(dld.shards)) {
-		return errors.New(fmt.Sprintf("The TotalShards field of all shards " +
-			"is %d, but we have %d shards.", totalShards, len(dld.shards)))
-	}
-	// Reorder shards in order of their ShardIndex.
-	reorderedShards := make([]*ShardLoader, len(dld.shards))
-	for i := 0; i < len(dld.shards); i++ {
-		shd := dld.shards[i]
-		shardIdx := shd.info.ShardIndex
-		if reorderedShards[shardIdx] != nil {
-			return errors.New(fmt.Sprintf("Both shard %s and " +
-				"shard %s have ShardIndex %d.", shd.path,
-				reorderedShards[shardIdx].path, shardIdx))
-		}
-		reorderedShards[shardIdx] = shd
-	}
-	dld.shards = reorderedShards
-	return nil
-}
-
-func (dld *DataStoreLoader) Load() error {
-	var err error
-	// If data.store.clear was set, clear existing data.
-	if dld.ClearStored {
-		err = dld.clearStored()
-		if err != nil {
-			return err
-		}
-	}
-	// Make sure the shard directories exist in all cases, with a mkdir -p 
-	for i := range dld.shards {
-		err := os.MkdirAll(dld.shards[i].path, 0777)
-		if err != nil {
-			return errors.New(fmt.Sprintf("Failed to MkdirAll(%s): %s",
-				dld.shards[i].path, err.Error()))
-		}
-	}
-	// Get information about each shard, and verify them.
-	dld.LoadShards()
-	err = dld.VerifyShardInfos()
-	if err != nil {
-		return err
-	}
-	if dld.shards[0].ldb != nil {
-		dld.lg.Infof("Loaded %d leveldb instances with " +
-			"DaemonId of 0x%016x\n", len(dld.shards),
-			dld.shards[0].info.DaemonId)
-	} else {
-		// Create leveldb instances if needed.
-		rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
-		daemonId := uint64(rnd.Int63())
-		dld.lg.Infof("Initializing %d leveldb instances with a new " +
-			"DaemonId of 0x%016x\n", len(dld.shards), daemonId)
-		dld.openOpts.SetCreateIfMissing(true)
-		for i := range(dld.shards) {
-			shd := dld.shards[i]
-			shd.ldb, err = levigo.Open(shd.path, shd.dld.openOpts)
-			if err != nil {
-				return errors.New(fmt.Sprintf("levigo.Open(%s) failed to " +
-					"create the shard: %s", shd.path, err.Error()))
-			}
-			info := &ShardInfo {
-				LayoutVersion: CURRENT_LAYOUT_VERSION,
-				DaemonId: daemonId,
-				TotalShards: uint32(len(dld.shards)),
-				ShardIndex: uint32(i),
-			}
-			err = shd.writeShardInfo(info)
-			if err != nil {
-				return errors.New(fmt.Sprintf("levigo.Open(%s) failed to " +
-					"write shard info: %s", shd.path, err.Error()))
-			}
-			dld.lg.Infof("Shard %s initialized with ShardInfo %s \n",
-				shd.path, asJson(info))
-		}
-	}
-	return nil
-}
-
-func (dld *DataStoreLoader) clearStored() error {
-	for i := range dld.shards {
-		path := dld.shards[i].path
-		fi, err := os.Stat(path)
-		if err != nil && !os.IsNotExist(err) {
-			dld.lg.Errorf("Failed to stat %s: %s\n", path, err.Error())
-			return err
-		}
-		if fi != nil {
-			err = os.RemoveAll(path)
-			if err != nil {
-				dld.lg.Errorf("Failed to clear existing datastore directory %s: %s\n",
-					path, err.Error())
-				return err
-			}
-			dld.lg.Infof("Cleared existing datastore directory %s\n", path)
-		}
-	}
-	return nil
-}
-
-type ShardLoader struct {
-	// The parent DataStoreLoader
-	dld *DataStoreLoader
-
-	// Path to the shard
-	path string
-
-	// Leveldb instance of the shard
-	ldb *levigo.DB
-
-	// Information about the shard
-	info *ShardInfo
-
-	// If non-null, the error we encountered trying to load the shard info.
-	infoErr error
-}
-
-func (shd *ShardLoader) Close() {
-	if shd.ldb != nil {
-		shd.ldb.Close()
-		shd.ldb = nil
-	}
-}
-
-// Load information about a particular shard.
-func (shd *ShardLoader) load() {
-	shd.info = nil
-	fi, err := os.Stat(shd.path)
-	if err != nil {
-		if os.IsNotExist(err) {
-			shd.infoErr = nil
-			return
-		}
-		shd.infoErr = errors.New(fmt.Sprintf(
-			"stat() error on leveldb directory " +
-				"%s: %s", shd.path, err.Error()))
-		return
-	}
-	if !fi.Mode().IsDir() {
-		shd.infoErr = errors.New(fmt.Sprintf(
-			"stat() error on leveldb directory " +
-				"%s: inode is not directory.", shd.path))
-		return
-	}
-	var dbDir *os.File
-	dbDir, err = os.Open(shd.path)
-	if err != nil {
-		shd.infoErr = errors.New(fmt.Sprintf(
-			"open() error on leveldb directory " +
-				"%s: %s.", shd.path, err.Error()))
-		return
-	}
-	defer func() {
-		if dbDir != nil {
-			dbDir.Close()
-		}
-	}()
-	_, err = dbDir.Readdirnames(1)
-	if err != nil {
-		if err == io.EOF {
-			// The db directory is empty.
-			shd.infoErr = nil
-			return
-		}
-		shd.infoErr = errors.New(fmt.Sprintf(
-			"Readdirnames() error on leveldb directory " +
-				"%s: %s.", shd.path, err.Error()))
-		return
-	}
-	dbDir.Close()
-	dbDir = nil
-	shd.ldb, err = levigo.Open(shd.path, shd.dld.openOpts)
-	if err != nil {
-		shd.ldb = nil
-		shd.infoErr = errors.New(fmt.Sprintf(
-			"levigo.Open() error on leveldb directory " +
-				"%s: %s.", shd.path, err.Error()))
-		return
-	}
-	shd.info, err = shd.readShardInfo()
-	if err != nil {
-		shd.infoErr = err
-		return
-	}
-	shd.infoErr = nil
-}
-
-func (shd *ShardLoader) readShardInfo() (*ShardInfo, error) {
-	buf, err := shd.ldb.Get(shd.dld.readOpts, []byte{SHARD_INFO_KEY})
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("readShardInfo(%s): failed to " +
-			"read shard info key: %s", shd.path, err.Error()))
-	}
-	if len(buf) == 0 {
-		return nil, errors.New(fmt.Sprintf("readShardInfo(%s): got zero-" +
-			"length value for shard info key.", shd.path))
-	}
-	mh := new(codec.MsgpackHandle)
-	mh.WriteExt = true
-	r := bytes.NewBuffer(buf)
-	decoder := codec.NewDecoder(r, mh)
-	shardInfo := &ShardInfo {
-		LayoutVersion: UNKNOWN_LAYOUT_VERSION,
-	}
-	err = decoder.Decode(shardInfo)
-	if err != nil {
-		return nil, errors.New(fmt.Sprintf("readShardInfo(%s): msgpack " +
-			"decoding failed for shard info key: %s", shd.path, err.Error()))
-	}
-	return shardInfo, nil
-}
-
-func (shd *ShardLoader) writeShardInfo(info *ShardInfo) error {
-	mh := new(codec.MsgpackHandle)
-	mh.WriteExt = true
-	w := new(bytes.Buffer)
-	enc := codec.NewEncoder(w, mh)
-	err := enc.Encode(info)
-	if err != nil {
-		return errors.New(fmt.Sprintf("msgpack encoding error: %s",
-			err.Error()))
-	}
-	err = shd.ldb.Put(shd.dld.writeOpts, []byte{SHARD_INFO_KEY}, w.Bytes())
-	if err != nil {
-		return errors.New(fmt.Sprintf("leveldb write error: %s",
-			err.Error()))
-	}
-	return nil
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/metrics.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/metrics.go b/htrace-htraced/go/src/org/apache/htrace/htraced/metrics.go
deleted file mode 100644
index 9176de0..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/metrics.go
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"math"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"sync"
-	"time"
-)
-
-//
-// The Metrics Sink for HTraced.
-//
-// The Metrics sink keeps track of metrics for the htraced daemon.
-// It is important to have good metrics so that we can properly manager htraced.  In particular, we
-// need to know what rate we are receiving spans at, the main places spans came from.  If spans
-// were dropped because of a high sampling rates, we need to know which part of the system dropped
-// them so that we can adjust the sampling rate there.
-//
-
-const LATENCY_CIRC_BUF_SIZE = 4096
-
-type MetricsSink struct {
-	// The metrics sink logger.
-	lg *common.Logger
-
-	// The maximum number of entries we shuld allow in the HostSpanMetrics map.
-	maxMtx int
-
-	// The total number of spans ingested by the server (counting dropped spans)
-	IngestedSpans uint64
-
-	// The total number of spans written to leveldb since the server started.
-	WrittenSpans uint64
-
-	// The total number of spans dropped by the server.
-	ServerDropped uint64
-
-	// Per-host Span Metrics
-	HostSpanMetrics common.SpanMetricsMap
-
-	// The last few writeSpan latencies
-	wsLatencyCircBuf *CircBufU32
-
-	// Lock protecting all metrics
-	lock sync.Mutex
-}
-
-func NewMetricsSink(cnf *conf.Config) *MetricsSink {
-	return &MetricsSink{
-		lg:               common.NewLogger("metrics", cnf),
-		maxMtx:           cnf.GetInt(conf.HTRACE_METRICS_MAX_ADDR_ENTRIES),
-		HostSpanMetrics:  make(common.SpanMetricsMap),
-		wsLatencyCircBuf: NewCircBufU32(LATENCY_CIRC_BUF_SIZE),
-	}
-}
-
-// Update the total number of spans which were ingested, as well as other
-// metrics that get updated during span ingest.
-func (msink *MetricsSink) UpdateIngested(addr string, totalIngested int,
-	serverDropped int, wsLatency time.Duration) {
-	msink.lock.Lock()
-	defer msink.lock.Unlock()
-	msink.IngestedSpans += uint64(totalIngested)
-	msink.ServerDropped += uint64(serverDropped)
-	msink.updateSpanMetrics(addr, 0, serverDropped)
-	wsLatencyMs := wsLatency.Nanoseconds() / 1000000
-	var wsLatency32 uint32
-	if wsLatencyMs > math.MaxUint32 {
-		wsLatency32 = math.MaxUint32
-	} else {
-		wsLatency32 = uint32(wsLatencyMs)
-	}
-	msink.wsLatencyCircBuf.Append(wsLatency32)
-}
-
-// Update the per-host span metrics.  Must be called with the lock held.
-func (msink *MetricsSink) updateSpanMetrics(addr string, numWritten int,
-	serverDropped int) {
-	mtx, found := msink.HostSpanMetrics[addr]
-	if !found {
-		// Ensure that the per-host span metrics map doesn't grow too large.
-		if len(msink.HostSpanMetrics) >= msink.maxMtx {
-			// Delete a random entry
-			for k := range msink.HostSpanMetrics {
-				msink.lg.Warnf("Evicting metrics entry for addr %s "+
-					"because there are more than %d addrs.\n", k, msink.maxMtx)
-				delete(msink.HostSpanMetrics, k)
-				break
-			}
-		}
-		mtx = &common.SpanMetrics{}
-		msink.HostSpanMetrics[addr] = mtx
-	}
-	mtx.Written += uint64(numWritten)
-	mtx.ServerDropped += uint64(serverDropped)
-}
-
-// Update the total number of spans which were persisted to disk.
-func (msink *MetricsSink) UpdatePersisted(addr string, totalWritten int,
-	serverDropped int) {
-	msink.lock.Lock()
-	defer msink.lock.Unlock()
-	msink.WrittenSpans += uint64(totalWritten)
-	msink.ServerDropped += uint64(serverDropped)
-	msink.updateSpanMetrics(addr, totalWritten, serverDropped)
-}
-
-// Read the server stats.
-func (msink *MetricsSink) PopulateServerStats(stats *common.ServerStats) {
-	msink.lock.Lock()
-	defer msink.lock.Unlock()
-	stats.IngestedSpans = msink.IngestedSpans
-	stats.WrittenSpans = msink.WrittenSpans
-	stats.ServerDroppedSpans = msink.ServerDropped
-	stats.MaxWriteSpansLatencyMs = msink.wsLatencyCircBuf.Max()
-	stats.AverageWriteSpansLatencyMs = msink.wsLatencyCircBuf.Average()
-	stats.HostSpanMetrics = make(common.SpanMetricsMap)
-	for k, v := range msink.HostSpanMetrics {
-		stats.HostSpanMetrics[k] = &common.SpanMetrics{
-			Written:       v.Written,
-			ServerDropped: v.ServerDropped,
-		}
-	}
-}
-
-// A circular buffer of uint32s which supports appending and taking the
-// average, and some other things.
-type CircBufU32 struct {
-	// The next slot to fill
-	slot int
-
-	// The number of slots which are in use.  This number only ever
-	// increases until the buffer is full.
-	slotsUsed int
-
-	// The buffer
-	buf []uint32
-}
-
-func NewCircBufU32(size int) *CircBufU32 {
-	return &CircBufU32{
-		slotsUsed: -1,
-		buf:       make([]uint32, size),
-	}
-}
-
-func (cbuf *CircBufU32) Max() uint32 {
-	var max uint32
-	for bufIdx := 0; bufIdx < cbuf.slotsUsed; bufIdx++ {
-		if cbuf.buf[bufIdx] > max {
-			max = cbuf.buf[bufIdx]
-		}
-	}
-	return max
-}
-
-func (cbuf *CircBufU32) Average() uint32 {
-	var total uint64
-	for bufIdx := 0; bufIdx < cbuf.slotsUsed; bufIdx++ {
-		total += uint64(cbuf.buf[bufIdx])
-	}
-	return uint32(total / uint64(cbuf.slotsUsed))
-}
-
-func (cbuf *CircBufU32) Append(val uint32) {
-	cbuf.buf[cbuf.slot] = val
-	cbuf.slot++
-	if cbuf.slotsUsed < cbuf.slot {
-		cbuf.slotsUsed = cbuf.slot
-	}
-	if cbuf.slot >= len(cbuf.buf) {
-		cbuf.slot = 0
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/metrics_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/metrics_test.go b/htrace-htraced/go/src/org/apache/htrace/htraced/metrics_test.go
deleted file mode 100644
index 6daf640..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/metrics_test.go
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"fmt"
-	htrace "org/apache/htrace/client"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"reflect"
-	"testing"
-	"time"
-)
-
-func compareTotals(a, b common.SpanMetricsMap) bool {
-	for k, v := range a {
-		if !reflect.DeepEqual(v, b[k]) {
-			return false
-		}
-	}
-	for k, v := range b {
-		if !reflect.DeepEqual(v, a[k]) {
-			return false
-		}
-	}
-	return true
-}
-
-type Fatalfer interface {
-	Fatalf(format string, args ...interface{})
-}
-
-func assertNumWrittenEquals(t Fatalfer, msink *MetricsSink,
-	expectedNumWritten int) {
-	var sstats common.ServerStats
-	msink.PopulateServerStats(&sstats)
-	if sstats.WrittenSpans != uint64(expectedNumWritten) {
-		t.Fatalf("sstats.WrittenSpans = %d, but expected %d\n",
-			sstats.WrittenSpans, len(SIMPLE_TEST_SPANS))
-	}
-	if sstats.HostSpanMetrics["127.0.0.1"] == nil {
-		t.Fatalf("no entry for sstats.HostSpanMetrics[127.0.0.1] found.")
-	}
-	if sstats.HostSpanMetrics["127.0.0.1"].Written !=
-		uint64(expectedNumWritten) {
-		t.Fatalf("sstats.HostSpanMetrics[127.0.0.1].Written = %d, but "+
-			"expected %d\n", sstats.HostSpanMetrics["127.0.0.1"].Written,
-			len(SIMPLE_TEST_SPANS))
-	}
-}
-
-func TestMetricsSinkPerHostEviction(t *testing.T) {
-	cnfBld := conf.Builder{
-		Values:   conf.TEST_VALUES(),
-		Defaults: conf.DEFAULTS,
-	}
-	cnfBld.Values[conf.HTRACE_METRICS_MAX_ADDR_ENTRIES] = "2"
-	cnf, err := cnfBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create conf: %s", err.Error())
-	}
-	msink := NewMetricsSink(cnf)
-	msink.UpdatePersisted("192.168.0.100", 20, 10)
-	msink.UpdatePersisted("192.168.0.101", 20, 10)
-	msink.UpdatePersisted("192.168.0.102", 20, 10)
-	msink.lock.Lock()
-	defer msink.lock.Unlock()
-	if len(msink.HostSpanMetrics) != 2 {
-		for k, v := range msink.HostSpanMetrics {
-			fmt.Printf("WATERMELON: [%s] = [%s]\n", k, v)
-		}
-		t.Fatalf("Expected len(msink.HostSpanMetrics) to be 2, but got %d\n",
-			len(msink.HostSpanMetrics))
-	}
-}
-
-func TestIngestedSpansMetricsRest(t *testing.T) {
-	testIngestedSpansMetricsImpl(t, false)
-}
-
-func TestIngestedSpansMetricsPacked(t *testing.T) {
-	testIngestedSpansMetricsImpl(t, true)
-}
-
-func testIngestedSpansMetricsImpl(t *testing.T, usePacked bool) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestIngestedSpansMetrics",
-		DataDirs: make([]string, 2),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), &htrace.TestHooks{
-		HrpcDisabled: !usePacked,
-	})
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-
-	NUM_TEST_SPANS := 12
-	allSpans := createRandomTestSpans(NUM_TEST_SPANS)
-	err = hcl.WriteSpans(allSpans)
-	if err != nil {
-		t.Fatalf("WriteSpans failed: %s\n", err.Error())
-	}
-	for {
-		var stats *common.ServerStats
-		stats, err = hcl.GetServerStats()
-		if err != nil {
-			t.Fatalf("GetServerStats failed: %s\n", err.Error())
-		}
-		if stats.IngestedSpans == uint64(NUM_TEST_SPANS) {
-			break
-		}
-		time.Sleep(1 * time.Millisecond)
-	}
-}
-
-func TestCircBuf32(t *testing.T) {
-	cbuf := NewCircBufU32(3)
-	// We arbitrarily define that empty circular buffers have an average of 0.
-	if cbuf.Average() != 0 {
-		t.Fatalf("expected empty CircBufU32 to have an average of 0.\n")
-	}
-	if cbuf.Max() != 0 {
-		t.Fatalf("expected empty CircBufU32 to have a max of 0.\n")
-	}
-	cbuf.Append(2)
-	if cbuf.Average() != 2 {
-		t.Fatalf("expected one-element CircBufU32 to have an average of 2.\n")
-	}
-	cbuf.Append(10)
-	if cbuf.Average() != 6 {
-		t.Fatalf("expected two-element CircBufU32 to have an average of 6.\n")
-	}
-	cbuf.Append(12)
-	if cbuf.Average() != 8 {
-		t.Fatalf("expected three-element CircBufU32 to have an average of 8.\n")
-	}
-	cbuf.Append(14)
-	// The 14 overwrites the original 2 element.
-	if cbuf.Average() != 12 {
-		t.Fatalf("expected three-element CircBufU32 to have an average of 12.\n")
-	}
-	cbuf.Append(1)
-	// The 1 overwrites the original 10 element.
-	if cbuf.Average() != 9 {
-		t.Fatalf("expected three-element CircBufU32 to have an average of 12.\n")
-	}
-	if cbuf.Max() != 14 {
-		t.Fatalf("expected three-element CircBufU32 to have a max of 14.\n")
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/mini_htraced.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/mini_htraced.go b/htrace-htraced/go/src/org/apache/htrace/htraced/mini_htraced.go
deleted file mode 100644
index cf7ef67..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/mini_htraced.go
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"fmt"
-	"io/ioutil"
-	"net"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"os"
-	"strings"
-)
-
-//
-// MiniHTraceD is used in unit tests to set up a daemon with certain settings.
-// It takes care of things like creating and cleaning up temporary directories.
-//
-
-// The default number of managed data directories to use.
-const DEFAULT_NUM_DATA_DIRS = 2
-
-// Builds a MiniHTraced object.
-type MiniHTracedBuilder struct {
-	// The name of the MiniHTraced to build.  This shows up in the test directory name and some
-	// other places.
-	Name string
-
-	// The configuration values to use for the MiniHTraced.
-	// If ths is nil, we use the default configuration for everything.
-	Cnf map[string]string
-
-	// The DataDirs to use.  Empty entries will turn into random names.
-	DataDirs []string
-
-	// If true, we will keep the data dirs around after MiniHTraced#Close
-	KeepDataDirsOnClose bool
-
-	// If non-null, the WrittenSpans semaphore to use when creating the DataStore.
-	WrittenSpans *common.Semaphore
-
-	// The test hooks to use for the HRPC server
-	HrpcTestHooks *hrpcTestHooks
-}
-
-type MiniHTraced struct {
-	Name                string
-	Cnf                 *conf.Config
-	DataDirs            []string
-	Store               *dataStore
-	Rsv                 *RestServer
-	Hsv                 *HrpcServer
-	Lg                  *common.Logger
-	KeepDataDirsOnClose bool
-}
-
-func (bld *MiniHTracedBuilder) Build() (*MiniHTraced, error) {
-	var err error
-	var store *dataStore
-	var rsv *RestServer
-	var hsv *HrpcServer
-	if bld.Name == "" {
-		bld.Name = "HTraceTest"
-	}
-	if bld.Cnf == nil {
-		bld.Cnf = make(map[string]string)
-	}
-	if bld.DataDirs == nil {
-		bld.DataDirs = make([]string, 2)
-	}
-	for idx := range bld.DataDirs {
-		if bld.DataDirs[idx] == "" {
-			bld.DataDirs[idx], err = ioutil.TempDir(os.TempDir(),
-				fmt.Sprintf("%s%d", bld.Name, idx+1))
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	// Copy the default test configuration values.
-	for k, v := range conf.TEST_VALUES() {
-		_, hasVal := bld.Cnf[k]
-		if !hasVal {
-			bld.Cnf[k] = v
-		}
-	}
-	bld.Cnf[conf.HTRACE_DATA_STORE_DIRECTORIES] =
-		strings.Join(bld.DataDirs, conf.PATH_LIST_SEP)
-	cnfBld := conf.Builder{Values: bld.Cnf, Defaults: conf.DEFAULTS}
-	cnf, err := cnfBld.Build()
-	if err != nil {
-		return nil, err
-	}
-	lg := common.NewLogger("mini.htraced", cnf)
-	defer func() {
-		if err != nil {
-			if store != nil {
-				store.Close()
-			}
-			for idx := range bld.DataDirs {
-				if !bld.KeepDataDirsOnClose {
-					if bld.DataDirs[idx] != "" {
-						os.RemoveAll(bld.DataDirs[idx])
-					}
-				}
-			}
-			if rsv != nil {
-				rsv.Close()
-			}
-			lg.Infof("Failed to create MiniHTraced %s: %s\n", bld.Name, err.Error())
-			lg.Close()
-		}
-	}()
-	store, err = CreateDataStore(cnf, bld.WrittenSpans)
-	if err != nil {
-		return nil, err
-	}
-	rstListener, listenErr := net.Listen("tcp", cnf.Get(conf.HTRACE_WEB_ADDRESS))
-	if listenErr != nil {
-		return nil, listenErr
-	}
-	defer func() {
-		if rstListener != nil {
-			rstListener.Close()
-		}
-	}()
-	rsv, err = CreateRestServer(cnf, store, rstListener)
-	if err != nil {
-		return nil, err
-	}
-	rstListener = nil
-	hsv, err = CreateHrpcServer(cnf, store, bld.HrpcTestHooks)
-	if err != nil {
-		return nil, err
-	}
-
-	lg.Infof("Created MiniHTraced %s\n", bld.Name)
-	return &MiniHTraced{
-		Name:                bld.Name,
-		Cnf:                 cnf,
-		DataDirs:            bld.DataDirs,
-		Store:               store,
-		Rsv:                 rsv,
-		Hsv:                 hsv,
-		Lg:                  lg,
-		KeepDataDirsOnClose: bld.KeepDataDirsOnClose,
-	}, nil
-}
-
-// Return a Config object that clients can use to connect to this MiniHTraceD.
-func (ht *MiniHTraced) ClientConf() *conf.Config {
-	return ht.Cnf.Clone(conf.HTRACE_WEB_ADDRESS, ht.Rsv.Addr().String(),
-		conf.HTRACE_HRPC_ADDRESS, ht.Hsv.Addr().String())
-}
-
-// Return a Config object that clients can use to connect to this MiniHTraceD
-// by HTTP only (no HRPC).
-func (ht *MiniHTraced) RestOnlyClientConf() *conf.Config {
-	return ht.Cnf.Clone(conf.HTRACE_WEB_ADDRESS, ht.Rsv.Addr().String(),
-		conf.HTRACE_HRPC_ADDRESS, "")
-}
-
-func (ht *MiniHTraced) Close() {
-	ht.Lg.Infof("Closing MiniHTraced %s\n", ht.Name)
-	ht.Rsv.Close()
-	ht.Hsv.Close()
-	ht.Store.Close()
-	if !ht.KeepDataDirsOnClose {
-		for idx := range ht.DataDirs {
-			ht.Lg.Infof("Removing %s...\n", ht.DataDirs[idx])
-			os.RemoveAll(ht.DataDirs[idx])
-		}
-	}
-	ht.Lg.Infof("Finished closing MiniHTraced %s\n", ht.Name)
-	ht.Lg.Close()
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/reaper_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/reaper_test.go b/htrace-htraced/go/src/org/apache/htrace/htraced/reaper_test.go
deleted file mode 100644
index 2d6a76f..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/reaper_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"fmt"
-	"math/rand"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"org/apache/htrace/test"
-	"testing"
-	"time"
-)
-
-func TestReapingOldSpans(t *testing.T) {
-	const NUM_TEST_SPANS = 20
-	testSpans := make([]*common.Span, NUM_TEST_SPANS)
-	rnd := rand.New(rand.NewSource(2))
-	now := common.TimeToUnixMs(time.Now().UTC())
-	for i := range testSpans {
-		testSpans[i] = test.NewRandomSpan(rnd, testSpans[0:i])
-		testSpans[i].Begin = now - int64(NUM_TEST_SPANS-1-i)
-		testSpans[i].Description = fmt.Sprintf("Span%02d", i)
-	}
-	htraceBld := &MiniHTracedBuilder{Name: "TestReapingOldSpans",
-		Cnf: map[string]string{
-			conf.HTRACE_SPAN_EXPIRY_MS:                fmt.Sprintf("%d", 60*60*1000),
-			conf.HTRACE_REAPER_HEARTBEAT_PERIOD_MS:    "1",
-			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "1",
-		},
-		WrittenSpans: common.NewSemaphore(0),
-		DataDirs:     make([]string, 2),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create mini htraced cluster: %s\n", err.Error())
-	}
-	ing := ht.Store.NewSpanIngestor(ht.Store.lg, "127.0.0.1", "")
-	for spanIdx := range testSpans {
-		ing.IngestSpan(testSpans[spanIdx])
-	}
-	ing.Close(time.Now())
-	// Wait the spans to be created
-	ht.Store.WrittenSpans.Waits(NUM_TEST_SPANS)
-	// Set a reaper date that will remove all the spans except final one.
-	ht.Store.rpr.SetReaperDate(now)
-
-	common.WaitFor(5*time.Minute, time.Millisecond, func() bool {
-		for i := 0; i < NUM_TEST_SPANS-1; i++ {
-			span := ht.Store.FindSpan(testSpans[i].Id)
-			if span != nil {
-				ht.Store.lg.Debugf("Waiting for %s to be removed...\n",
-					testSpans[i].Description)
-				return false
-			}
-		}
-		span := ht.Store.FindSpan(testSpans[NUM_TEST_SPANS-1].Id)
-		if span == nil {
-			ht.Store.lg.Debugf("Did not expect %s to be removed\n",
-				testSpans[NUM_TEST_SPANS-1].Description)
-			return false
-		}
-		return true
-	})
-	defer ht.Close()
-}


[5/7] incubator-htrace git commit: HTRACE-357. Rename htrace-htraced/go/src/org/apache/htrace to htrace-htraced/go/src/htrace (Colin Patrick McCabe via iwasakims)

Posted by iw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/heartbeater_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/heartbeater_test.go b/htrace-htraced/go/src/htrace/htraced/heartbeater_test.go
new file mode 100644
index 0000000..9157965
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/heartbeater_test.go
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"htrace/common"
+	"htrace/conf"
+	"testing"
+	"time"
+)
+
+func TestHeartbeaterStartupShutdown(t *testing.T) {
+	cnfBld := conf.Builder{
+		Values:   conf.TEST_VALUES(),
+		Defaults: conf.DEFAULTS,
+	}
+	cnf, err := cnfBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create conf: %s", err.Error())
+	}
+	lg := common.NewLogger("heartbeater", cnf)
+	hb := NewHeartbeater("ExampleHeartbeater", 1, lg)
+	if hb.String() != "ExampleHeartbeater" {
+		t.Fatalf("hb.String() returned %s instead of %s\n", hb.String(), "ExampleHeartbeater")
+	}
+	hb.Shutdown()
+}
+
+// The number of milliseconds between heartbeats
+const HEARTBEATER_PERIOD = 5
+
+// The number of heartbeats to send in the test.
+const NUM_TEST_HEARTBEATS = 3
+
+func TestHeartbeaterSendsHeartbeats(t *testing.T) {
+	cnfBld := conf.Builder{
+		Values:   conf.TEST_VALUES(),
+		Defaults: conf.DEFAULTS,
+	}
+	cnf, err := cnfBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create conf: %s", err.Error())
+	}
+	lg := common.NewLogger("heartbeater", cnf)
+	// The minimum amount of time which the heartbeater test should take
+	MINIMUM_TEST_DURATION := time.Millisecond * (NUM_TEST_HEARTBEATS * HEARTBEATER_PERIOD)
+	duration := MINIMUM_TEST_DURATION
+	for duration <= MINIMUM_TEST_DURATION {
+		start := time.Now()
+		testHeartbeaterSendsHeartbeatsImpl(t, lg)
+		end := time.Now()
+		duration = end.Sub(start)
+		lg.Debugf("Measured duration: %v; minimum expected duration: %v\n",
+			duration, MINIMUM_TEST_DURATION)
+	}
+}
+
+func testHeartbeaterSendsHeartbeatsImpl(t *testing.T, lg *common.Logger) {
+	hb := NewHeartbeater("ExampleHeartbeater", HEARTBEATER_PERIOD, lg)
+	if hb.String() != "ExampleHeartbeater" {
+		t.Fatalf("hb.String() returned %s instead of %s\n", hb.String(), "ExampleHeartbeater")
+	}
+	testChan := make(chan interface{}, NUM_TEST_HEARTBEATS)
+	gotAllHeartbeats := make(chan bool)
+	hb.AddHeartbeatTarget(&HeartbeatTarget{
+		name:       "ExampleHeartbeatTarget",
+		targetChan: testChan,
+	})
+	go func() {
+		for i := 0; i < NUM_TEST_HEARTBEATS; i++ {
+			<-testChan
+		}
+		gotAllHeartbeats <- true
+		for i := 0; i < NUM_TEST_HEARTBEATS; i++ {
+			_, open := <-testChan
+			if !open {
+				return
+			}
+		}
+	}()
+	<-gotAllHeartbeats
+	hb.Shutdown()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/hrpc.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/hrpc.go b/htrace-htraced/go/src/htrace/htraced/hrpc.go
new file mode 100644
index 0000000..8b5a728
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/hrpc.go
@@ -0,0 +1,386 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/ugorji/go/codec"
+	"htrace/common"
+	"htrace/conf"
+	"io"
+	"net"
+	"net/rpc"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+const MAX_HRPC_HANDLERS = 32765
+
+// Handles HRPC calls
+type HrpcHandler struct {
+	lg    *common.Logger
+	store *dataStore
+}
+
+// The HRPC server
+type HrpcServer struct {
+	*rpc.Server
+	hand *HrpcHandler
+
+	// The listener we are using to accept new connections.
+	listener net.Listener
+
+	// A WaitGroup used to block until the HRPC server has exited.
+	exited sync.WaitGroup
+
+	// A channel containing server codecs to use.  This channel is fully
+	// buffered.  The number of entries it initially contains determines how
+	// many concurrent codecs we will have running at once.
+	cdcs chan *HrpcServerCodec
+
+	// Used to shut down
+	shutdown chan interface{}
+
+	// The I/O timeout to use when reading requests or sending responses.  This
+	// timeout does not apply to the time we spend processing the message.
+	ioTimeo time.Duration
+
+	// A count of all I/O errors that we have encountered since the server
+	// started.  This counts errors like improperly formatted message frames,
+	// but not errors like properly formatted but invalid messages.
+	// This count is updated from multiple goroutines via sync/atomic.
+	ioErrorCount uint64
+
+	// The test hooks to use, or nil during normal operation.
+	testHooks *hrpcTestHooks
+}
+
+type hrpcTestHooks struct {
+	// A callback we make right after calling Accept() but before reading from
+	// the new connection.
+	HandleAdmission func()
+}
+
+// A codec which encodes HRPC data via JSON.  This structure holds the context
+// for a particular client connection.
+type HrpcServerCodec struct {
+	lg *common.Logger
+
+	// The current connection.
+	conn net.Conn
+
+	// The HrpcServer which this connection is part of.
+	hsv *HrpcServer
+
+	// The message length we read from the header.
+	length uint32
+
+	// The number of messages this connection has handled.
+	numHandled int
+
+	// The buffer for reading requests.  These buffers are reused for multiple
+	// requests to avoid allocating memory.
+	buf []byte
+
+	// Configuration for msgpack decoding
+	msgpackHandle codec.MsgpackHandle
+}
+
+func asJson(val interface{}) string {
+	js, err := json.Marshal(val)
+	if err != nil {
+		return "encoding error: " + err.Error()
+	}
+	return string(js)
+}
+
+func newIoErrorWarn(cdc *HrpcServerCodec, val string) error {
+	return newIoError(cdc, val, common.WARN)
+}
+
+func newIoError(cdc *HrpcServerCodec, val string, level common.Level) error {
+	if cdc.lg.LevelEnabled(level) {
+		cdc.lg.Write(level, cdc.conn.RemoteAddr().String()+": "+val+"\n")
+	}
+	if level >= common.INFO {
+		atomic.AddUint64(&cdc.hsv.ioErrorCount, 1)
+	}
+	return errors.New(val)
+}
+
+func (cdc *HrpcServerCodec) ReadRequestHeader(req *rpc.Request) error {
+	hdr := common.HrpcRequestHeader{}
+	if cdc.lg.TraceEnabled() {
+		cdc.lg.Tracef("%s: Reading HRPC request header.\n", cdc.conn.RemoteAddr())
+	}
+	cdc.conn.SetDeadline(time.Now().Add(cdc.hsv.ioTimeo))
+	err := binary.Read(cdc.conn, binary.LittleEndian, &hdr)
+	if err != nil {
+		if err == io.EOF && cdc.numHandled > 0 {
+			return newIoError(cdc, fmt.Sprintf("Remote closed connection "+
+				"after writing %d message(s)", cdc.numHandled), common.DEBUG)
+		}
+		return newIoError(cdc,
+			fmt.Sprintf("Error reading request header: %s", err.Error()), common.WARN)
+	}
+	if cdc.lg.TraceEnabled() {
+		cdc.lg.Tracef("%s: Read HRPC request header %s\n",
+			cdc.conn.RemoteAddr(), asJson(&hdr))
+	}
+	if hdr.Magic != common.HRPC_MAGIC {
+		return newIoErrorWarn(cdc, fmt.Sprintf("Invalid request header: expected "+
+			"magic number of 0x%04x, but got 0x%04x", common.HRPC_MAGIC, hdr.Magic))
+	}
+	if hdr.Length > common.MAX_HRPC_BODY_LENGTH {
+		return newIoErrorWarn(cdc, fmt.Sprintf("Length prefix was too long.  "+
+			"Maximum length is %d, but we got %d.", common.MAX_HRPC_BODY_LENGTH,
+			hdr.Length))
+	}
+	req.ServiceMethod = common.HrpcMethodIdToMethodName(hdr.MethodId)
+	if req.ServiceMethod == "" {
+		return newIoErrorWarn(cdc, fmt.Sprintf("Unknown MethodID code 0x%04x",
+			hdr.MethodId))
+	}
+	req.Seq = hdr.Seq
+	cdc.length = hdr.Length
+	return nil
+}
+
+func (cdc *HrpcServerCodec) ReadRequestBody(body interface{}) error {
+	remoteAddr := cdc.conn.RemoteAddr().String()
+	if cdc.lg.TraceEnabled() {
+		cdc.lg.Tracef("%s: Reading HRPC %d-byte request body.\n",
+			remoteAddr, cdc.length)
+	}
+	if cap(cdc.buf) < int(cdc.length) {
+		var pow uint
+		for pow = 0; (1 << pow) < int(cdc.length); pow++ {
+		}
+		cdc.buf = make([]byte, 0, 1<<pow)
+	}
+	_, err := io.ReadFull(cdc.conn, cdc.buf[:cdc.length])
+	if err != nil {
+		return newIoErrorWarn(cdc, fmt.Sprintf("Failed to read %d-byte "+
+			"request body: %s", cdc.length, err.Error()))
+	}
+	var zeroTime time.Time
+	cdc.conn.SetDeadline(zeroTime)
+
+	dec := codec.NewDecoderBytes(cdc.buf[:cdc.length], &cdc.msgpackHandle)
+	err = dec.Decode(body)
+	if cdc.lg.TraceEnabled() {
+		cdc.lg.Tracef("%s: read HRPC message: %s\n",
+			remoteAddr, asJson(&body))
+	}
+	req := body.(*common.WriteSpansReq)
+	if req == nil {
+		return nil
+	}
+	// We decode WriteSpans requests in a streaming fashion, to avoid overloading the garbage
+	// collector with a ton of trace spans all at once.
+	startTime := time.Now()
+	client, _, err := net.SplitHostPort(remoteAddr)
+	if err != nil {
+		return newIoErrorWarn(cdc, fmt.Sprintf("Failed to split host and port "+
+			"for %s: %s\n", remoteAddr, err.Error()))
+	}
+	hand := cdc.hsv.hand
+	ing := hand.store.NewSpanIngestor(hand.lg, client, req.DefaultTrid)
+	for spanIdx := 0; spanIdx < req.NumSpans; spanIdx++ {
+		var span *common.Span
+		err := dec.Decode(&span)
+		if err != nil {
+			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to decode span %d "+
+				"out of %d: %s\n", spanIdx, req.NumSpans, err.Error()))
+		}
+		ing.IngestSpan(span)
+	}
+	ing.Close(startTime)
+	return nil
+}
+
+var EMPTY []byte = make([]byte, 0)
+
+func (cdc *HrpcServerCodec) WriteResponse(resp *rpc.Response, msg interface{}) error {
+	cdc.conn.SetDeadline(time.Now().Add(cdc.hsv.ioTimeo))
+	var err error
+	buf := EMPTY
+	if msg != nil {
+		w := bytes.NewBuffer(make([]byte, 0, 128))
+		enc := codec.NewEncoder(w, &cdc.msgpackHandle)
+		err := enc.Encode(msg)
+		if err != nil {
+			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to marshal "+
+				"response message: %s", err.Error()))
+		}
+		buf = w.Bytes()
+	}
+	hdr := common.HrpcResponseHeader{}
+	hdr.MethodId = common.HrpcMethodNameToId(resp.ServiceMethod)
+	hdr.Seq = resp.Seq
+	hdr.ErrLength = uint32(len(resp.Error))
+	hdr.Length = uint32(len(buf))
+	writer := bufio.NewWriterSize(cdc.conn, 256)
+	err = binary.Write(writer, binary.LittleEndian, &hdr)
+	if err != nil {
+		return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write response "+
+			"header: %s", err.Error()))
+	}
+	if hdr.ErrLength > 0 {
+		_, err = io.WriteString(writer, resp.Error)
+		if err != nil {
+			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write error "+
+				"string: %s", err.Error()))
+		}
+	}
+	if hdr.Length > 0 {
+		var length int
+		length, err = writer.Write(buf)
+		if err != nil {
+			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write response "+
+				"message: %s", err.Error()))
+		}
+		if uint32(length) != hdr.Length {
+			return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write all of "+
+				"response message: %s", err.Error()))
+		}
+	}
+	err = writer.Flush()
+	if err != nil {
+		return newIoErrorWarn(cdc, fmt.Sprintf("Failed to write the response "+
+			"bytes: %s", err.Error()))
+	}
+	cdc.numHandled++
+	return nil
+}
+
+func (cdc *HrpcServerCodec) Close() error {
+	err := cdc.conn.Close()
+	cdc.conn = nil
+	cdc.length = 0
+	cdc.numHandled = 0
+	cdc.hsv.cdcs <- cdc
+	return err
+}
+
+func (hand *HrpcHandler) WriteSpans(req *common.WriteSpansReq,
+	resp *common.WriteSpansResp) (err error) {
+	// Nothing to do here; WriteSpans is handled in ReadRequestBody.
+	return nil
+}
+
+func CreateHrpcServer(cnf *conf.Config, store *dataStore,
+	testHooks *hrpcTestHooks) (*HrpcServer, error) {
+	lg := common.NewLogger("hrpc", cnf)
+	numHandlers := cnf.GetInt(conf.HTRACE_NUM_HRPC_HANDLERS)
+	if numHandlers < 1 {
+		lg.Warnf("%s must be positive: using 1 handler.\n", conf.HTRACE_NUM_HRPC_HANDLERS)
+		numHandlers = 1
+	}
+	if numHandlers > MAX_HRPC_HANDLERS {
+		lg.Warnf("%s cannot be more than %d: using %d handlers\n",
+			conf.HTRACE_NUM_HRPC_HANDLERS, MAX_HRPC_HANDLERS, MAX_HRPC_HANDLERS)
+		numHandlers = MAX_HRPC_HANDLERS
+	}
+	hsv := &HrpcServer{
+		Server: rpc.NewServer(),
+		hand: &HrpcHandler{
+			lg:    lg,
+			store: store,
+		},
+		cdcs:     make(chan *HrpcServerCodec, numHandlers),
+		shutdown: make(chan interface{}),
+		ioTimeo: time.Millisecond *
+			time.Duration(cnf.GetInt64(conf.HTRACE_HRPC_IO_TIMEOUT_MS)),
+		testHooks: testHooks,
+	}
+	for i := 0; i < numHandlers; i++ {
+		hsv.cdcs <- &HrpcServerCodec{
+			lg:  lg,
+			hsv: hsv,
+			msgpackHandle: codec.MsgpackHandle{
+				WriteExt: true,
+			},
+		}
+	}
+	var err error
+	hsv.listener, err = net.Listen("tcp", cnf.Get(conf.HTRACE_HRPC_ADDRESS))
+	if err != nil {
+		return nil, err
+	}
+	hsv.Server.Register(hsv.hand)
+	hsv.exited.Add(1)
+	go hsv.run()
+	lg.Infof("Started HRPC server on %s with %d handler routines. "+
+		"ioTimeo=%s.\n", hsv.listener.Addr().String(), numHandlers,
+		hsv.ioTimeo.String())
+	return hsv, nil
+}
+
+func (hsv *HrpcServer) run() {
+	lg := hsv.hand.lg
+	srvAddr := hsv.listener.Addr().String()
+	defer func() {
+		lg.Infof("HrpcServer on %s exiting\n", srvAddr)
+		hsv.exited.Done()
+	}()
+	for {
+		select {
+		case cdc := <-hsv.cdcs:
+			conn, err := hsv.listener.Accept()
+			if err != nil {
+				lg.Errorf("HrpcServer on %s got accept error: %s\n", srvAddr, err.Error())
+				hsv.cdcs <- cdc // never blocks; there is always sufficient buffer space
+				continue
+			}
+			if lg.TraceEnabled() {
+				lg.Tracef("%s: Accepted HRPC connection.\n", conn.RemoteAddr())
+			}
+			cdc.conn = conn
+			cdc.numHandled = 0
+			if hsv.testHooks != nil && hsv.testHooks.HandleAdmission != nil {
+				hsv.testHooks.HandleAdmission()
+			}
+			go hsv.ServeCodec(cdc)
+		case <-hsv.shutdown:
+			return
+		}
+	}
+}
+
+func (hsv *HrpcServer) Addr() net.Addr {
+	return hsv.listener.Addr()
+}
+
+func (hsv *HrpcServer) GetNumIoErrors() uint64 {
+	return atomic.LoadUint64(&hsv.ioErrorCount)
+}
+
+func (hsv *HrpcServer) Close() {
+	close(hsv.shutdown)
+	hsv.listener.Close()
+	hsv.exited.Wait()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/htraced.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/htraced.go b/htrace-htraced/go/src/htrace/htraced/htraced.go
new file mode 100644
index 0000000..0d41e0d
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/htraced.go
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"github.com/alecthomas/kingpin"
+	"github.com/jmhodges/levigo"
+	"htrace/common"
+	"htrace/conf"
+	"net"
+	"os"
+	"runtime"
+	"time"
+)
+
+var RELEASE_VERSION string
+var GIT_VERSION string
+
+const USAGE = `htraced: the HTrace server daemon.
+
+htraced receives trace spans sent from HTrace clients.  It exposes a REST
+interface which others can query.  It also runs a web server with a graphical
+user interface.  htraced stores its span data in levelDB files on the local
+disks.
+
+Usage:
+--help: this help message
+
+-Dk=v: set configuration key 'k' to value 'v'
+For example -Dweb.address=127.0.0.1:8080 sets the web address to localhost,
+port 8080.  -Dlog.level=DEBUG will set the default log level to DEBUG.
+
+-Dk: set configuration key 'k' to 'true'
+
+Normally, configuration options should be set in the ` + conf.CONFIG_FILE_NAME + `
+configuration file.  We find this file by searching the paths in the
+` + conf.HTRACED_CONF_DIR + `. The command-line options are just an alternate way
+of setting configuration when launching the daemon.
+`
+
+func main() {
+	// Load the htraced configuration.
+	// This also parses the -Dfoo=bar command line arguments and removes them
+	// from os.Argv.
+	cnf, cnfLog := conf.LoadApplicationConfig("htraced.")
+
+	// Parse the remaining command-line arguments.
+	app := kingpin.New(os.Args[0], USAGE)
+	version := app.Command("version", "Print server version and exit.")
+	cmd := kingpin.MustParse(app.Parse(os.Args[1:]))
+
+	// Handle the "version" command-line argument.
+	if cmd == version.FullCommand() {
+		fmt.Printf("Running htraced %s [%s].\n", RELEASE_VERSION, GIT_VERSION)
+		os.Exit(0)
+	}
+
+	// Open the HTTP port.
+	// We want to do this first, before initializing the datastore or setting up
+	// logging.  That way, if someone accidentally starts two daemons with the
+	// same config file, the second invocation will exit with a "port in use"
+	// error rather than potentially disrupting the first invocation.
+	rstListener, listenErr := net.Listen("tcp", cnf.Get(conf.HTRACE_WEB_ADDRESS))
+	if listenErr != nil {
+		fmt.Fprintf(os.Stderr, "Error opening HTTP port: %s\n",
+			listenErr.Error())
+		os.Exit(1)
+	}
+
+	// Print out the startup banner and information about the daemon
+	// configuration.
+	lg := common.NewLogger("main", cnf)
+	defer lg.Close()
+	lg.Infof("*** Starting htraced %s [%s]***\n", RELEASE_VERSION, GIT_VERSION)
+	scanner := bufio.NewScanner(cnfLog)
+	for scanner.Scan() {
+		lg.Infof(scanner.Text() + "\n")
+	}
+	common.InstallSignalHandlers(cnf)
+	if runtime.GOMAXPROCS(0) == 1 {
+		ncpu := runtime.NumCPU()
+		runtime.GOMAXPROCS(ncpu)
+		lg.Infof("setting GOMAXPROCS=%d\n", ncpu)
+	} else {
+		lg.Infof("GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0))
+	}
+	lg.Infof("leveldb version=%d.%d\n",
+		levigo.GetLevelDBMajorVersion(), levigo.GetLevelDBMinorVersion())
+
+	// Initialize the datastore.
+	store, err := CreateDataStore(cnf, nil)
+	if err != nil {
+		lg.Errorf("Error creating datastore: %s\n", err.Error())
+		os.Exit(1)
+	}
+	var rsv *RestServer
+	rsv, err = CreateRestServer(cnf, store, rstListener)
+	if err != nil {
+		lg.Errorf("Error creating REST server: %s\n", err.Error())
+		os.Exit(1)
+	}
+	var hsv *HrpcServer
+	if cnf.Get(conf.HTRACE_HRPC_ADDRESS) != "" {
+		hsv, err = CreateHrpcServer(cnf, store, nil)
+		if err != nil {
+			lg.Errorf("Error creating HRPC server: %s\n", err.Error())
+			os.Exit(1)
+		}
+	} else {
+		lg.Infof("Not starting HRPC server because no value was given for %s.\n",
+			conf.HTRACE_HRPC_ADDRESS)
+	}
+	naddr := cnf.Get(conf.HTRACE_STARTUP_NOTIFICATION_ADDRESS)
+	if naddr != "" {
+		notif := StartupNotification{
+			HttpAddr:  rsv.Addr().String(),
+			ProcessId: os.Getpid(),
+		}
+		if hsv != nil {
+			notif.HrpcAddr = hsv.Addr().String()
+		}
+		err = sendStartupNotification(naddr, &notif)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to send startup notification: "+
+				"%s\n", err.Error())
+			os.Exit(1)
+		}
+	}
+	for {
+		time.Sleep(time.Duration(10) * time.Hour)
+	}
+}
+
+// A startup notification message that we optionally send on startup.
+// Used by unit tests.
+type StartupNotification struct {
+	HttpAddr  string
+	HrpcAddr  string
+	ProcessId int
+}
+
+func sendStartupNotification(naddr string, notif *StartupNotification) error {
+	conn, err := net.Dial("tcp", naddr)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if conn != nil {
+			conn.Close()
+		}
+	}()
+	var buf []byte
+	buf, err = json.Marshal(notif)
+	if err != nil {
+		return err
+	}
+	_, err = conn.Write(buf)
+	conn.Close()
+	conn = nil
+	return nil
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/loader.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/loader.go b/htrace-htraced/go/src/htrace/htraced/loader.go
new file mode 100644
index 0000000..95c5c3e
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/loader.go
@@ -0,0 +1,511 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"github.com/jmhodges/levigo"
+	"github.com/ugorji/go/codec"
+	"htrace/common"
+	"htrace/conf"
+	"io"
+	"math"
+	"math/rand"
+	"os"
+	"strings"
+	"syscall"
+	"time"
+)
+
+// Routines for loading the datastore.
+
+// The leveldb key which has information about the shard.
+const SHARD_INFO_KEY = 'w'
+
+// A constant signifying that we don't know what the layout version is.
+const UNKNOWN_LAYOUT_VERSION = 0
+
+// The current layout version.  We cannot read layout versions newer than this.
+// We may sometimes be able to read older versions, but only by doing an
+// upgrade.
+const CURRENT_LAYOUT_VERSION = 3
+
+type DataStoreLoader struct {
+	// The dataStore logger.
+	lg *common.Logger
+
+	// True if we should clear the stored data.
+	ClearStored bool
+
+	// The shards that we're loading
+	shards []*ShardLoader
+
+	// The options to use for opening datastores in LevelDB.
+	openOpts *levigo.Options
+
+	// The read options to use for LevelDB.
+	readOpts *levigo.ReadOptions
+
+	// The write options to use for LevelDB.
+	writeOpts *levigo.WriteOptions
+}
+
+// Information about a Shard.
+type ShardInfo struct {
+	// The layout version of the datastore.
+	// We should always keep this field so that old software can recognize new
+	// layout versions, even if it can't read them.
+	LayoutVersion uint64
+
+	// A random number identifying this daemon.
+	DaemonId uint64
+
+	// The total number of shards in this datastore.
+	TotalShards uint32
+
+	// The index of this shard within the datastore.
+	ShardIndex uint32
+}
+
+// Create a new datastore loader.
+// Initializes the loader, but does not load any leveldb instances.
+func NewDataStoreLoader(cnf *conf.Config) *DataStoreLoader {
+	dld := &DataStoreLoader{
+		lg:          common.NewLogger("datastore", cnf),
+		ClearStored: cnf.GetBool(conf.HTRACE_DATA_STORE_CLEAR),
+	}
+	dld.readOpts = levigo.NewReadOptions()
+	dld.readOpts.SetFillCache(true)
+	dld.readOpts.SetVerifyChecksums(false)
+	dld.writeOpts = levigo.NewWriteOptions()
+	dld.writeOpts.SetSync(false)
+	dirsStr := cnf.Get(conf.HTRACE_DATA_STORE_DIRECTORIES)
+	rdirs := strings.Split(dirsStr, conf.PATH_LIST_SEP)
+	// Filter out empty entries
+	dirs := make([]string, 0, len(rdirs))
+	for i := range rdirs {
+		if strings.TrimSpace(rdirs[i]) != "" {
+			dirs = append(dirs, rdirs[i])
+		}
+	}
+	dld.shards = make([]*ShardLoader, len(dirs))
+	for i := range dirs {
+		dld.shards[i] = &ShardLoader{
+			dld:  dld,
+			path: dirs[i] + conf.PATH_SEP + "db",
+		}
+	}
+	dld.openOpts = levigo.NewOptions()
+	cacheSize := cnf.GetInt(conf.HTRACE_LEVELDB_CACHE_SIZE)
+	dld.openOpts.SetCache(levigo.NewLRUCache(cacheSize))
+	dld.openOpts.SetParanoidChecks(false)
+	writeBufferSize := cnf.GetInt(conf.HTRACE_LEVELDB_WRITE_BUFFER_SIZE)
+	if writeBufferSize > 0 {
+		dld.openOpts.SetWriteBufferSize(writeBufferSize)
+	}
+	maxFdPerShard := dld.calculateMaxOpenFilesPerShard()
+	if maxFdPerShard > 0 {
+		dld.openOpts.SetMaxOpenFiles(maxFdPerShard)
+	}
+	return dld
+}
+
+func (dld *DataStoreLoader) Close() {
+	if dld.lg != nil {
+		dld.lg.Close()
+		dld.lg = nil
+	}
+	if dld.openOpts != nil {
+		dld.openOpts.Close()
+		dld.openOpts = nil
+	}
+	if dld.readOpts != nil {
+		dld.readOpts.Close()
+		dld.readOpts = nil
+	}
+	if dld.writeOpts != nil {
+		dld.writeOpts.Close()
+		dld.writeOpts = nil
+	}
+	if dld.shards != nil {
+		for i := range dld.shards {
+			if dld.shards[i] != nil {
+				dld.shards[i].Close()
+			}
+		}
+		dld.shards = nil
+	}
+}
+
+func (dld *DataStoreLoader) DisownResources() {
+	dld.lg = nil
+	dld.openOpts = nil
+	dld.readOpts = nil
+	dld.writeOpts = nil
+	dld.shards = nil
+}
+
+// The maximum number of file descriptors we'll use on non-datastore things.
+const NON_DATASTORE_FD_MAX = 300
+
+// The minimum number of file descriptors per shard we will set.  Setting fewer
+// than this number could trigger a bug in some early versions of leveldb.
+const MIN_FDS_PER_SHARD = 80
+
+func (dld *DataStoreLoader) calculateMaxOpenFilesPerShard() int {
+	var rlim syscall.Rlimit
+	err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim)
+	if err != nil {
+		dld.lg.Warnf("Unable to calculate maximum open files per shard: "+
+			"getrlimit failed: %s\n", err.Error())
+		return 0
+	}
+	// I think RLIMIT_NOFILE fits in 32 bits on all known operating systems,
+	// but there's no harm in being careful.  'int' in golang always holds at
+	// least 32 bits.
+	var maxFd int
+	if rlim.Cur > uint64(math.MaxInt32) {
+		maxFd = math.MaxInt32
+	} else {
+		maxFd = int(rlim.Cur)
+	}
+	if len(dld.shards) == 0 {
+		dld.lg.Warnf("Unable to calculate maximum open files per shard, " +
+			"since there are 0 shards configured.\n")
+		return 0
+	}
+	fdsPerShard := (maxFd - NON_DATASTORE_FD_MAX) / len(dld.shards)
+	if fdsPerShard < MIN_FDS_PER_SHARD {
+		dld.lg.Warnf("Expected to be able to use at least %d "+
+			"fds per shard, but we have %d shards and %d total fds to allocate, "+
+			"giving us only %d FDs per shard.", MIN_FDS_PER_SHARD,
+			len(dld.shards), maxFd-NON_DATASTORE_FD_MAX, fdsPerShard)
+		return 0
+	}
+	dld.lg.Infof("maxFd = %d.  Setting maxFdPerShard = %d\n",
+		maxFd, fdsPerShard)
+	return fdsPerShard
+}
+
+// Load information about all shards.
+func (dld *DataStoreLoader) LoadShards() {
+	for i := range dld.shards {
+		shd := dld.shards[i]
+		shd.load()
+	}
+}
+
+// Verify that the shard infos are consistent.
+// Reorders the shardInfo structures based on their ShardIndex.
+func (dld *DataStoreLoader) VerifyShardInfos() error {
+	if len(dld.shards) < 1 {
+		return errors.New("No shard directories found.")
+	}
+	// Make sure no shards had errors.
+	for i := range dld.shards {
+		shd := dld.shards[i]
+		if shd.infoErr != nil {
+			return shd.infoErr
+		}
+	}
+	// Make sure that if any shards are empty, all shards are empty.
+	emptyShards := ""
+	prefix := ""
+	for i := range dld.shards {
+		if dld.shards[i].info == nil {
+			emptyShards = prefix + dld.shards[i].path
+			prefix = ", "
+		}
+	}
+	if emptyShards != "" {
+		for i := range dld.shards {
+			if dld.shards[i].info != nil {
+				return errors.New(fmt.Sprintf("Shards %s were empty, but "+
+					"the other shards had data.", emptyShards))
+			}
+		}
+		// All shards are empty.
+		return nil
+	}
+	// Make sure that all shards have the same layout version, daemonId, and number of total
+	// shards.
+	layoutVersion := dld.shards[0].info.LayoutVersion
+	daemonId := dld.shards[0].info.DaemonId
+	totalShards := dld.shards[0].info.TotalShards
+	for i := 1; i < len(dld.shards); i++ {
+		shd := dld.shards[i]
+		if layoutVersion != shd.info.LayoutVersion {
+			return errors.New(fmt.Sprintf("Layout version mismatch.  Shard "+
+				"%s has layout version 0x%016x, but shard %s has layout "+
+				"version 0x%016x.",
+				dld.shards[0].path, layoutVersion, shd.path, shd.info.LayoutVersion))
+		}
+		if daemonId != shd.info.DaemonId {
+			return errors.New(fmt.Sprintf("DaemonId mismatch. Shard %s has "+
+				"daemonId 0x%016x, but shard %s has daemonId 0x%016x.",
+				dld.shards[0].path, daemonId, shd.path, shd.info.DaemonId))
+		}
+		if totalShards != shd.info.TotalShards {
+			return errors.New(fmt.Sprintf("TotalShards mismatch.  Shard %s has "+
+				"TotalShards = %d, but shard %s has TotalShards = %d.",
+				dld.shards[0].path, totalShards, shd.path, shd.info.TotalShards))
+		}
+		if shd.info.ShardIndex >= totalShards {
+			return errors.New(fmt.Sprintf("Invalid ShardIndex.  Shard %s has "+
+				"ShardIndex = %d, but TotalShards = %d.",
+				shd.path, shd.info.ShardIndex, shd.info.TotalShards))
+		}
+	}
+	if layoutVersion != CURRENT_LAYOUT_VERSION {
+		return errors.New(fmt.Sprintf("The layout version of all shards "+
+			"is %d, but we only support version %d.",
+			layoutVersion, CURRENT_LAYOUT_VERSION))
+	}
+	if totalShards != uint32(len(dld.shards)) {
+		return errors.New(fmt.Sprintf("The TotalShards field of all shards "+
+			"is %d, but we have %d shards.", totalShards, len(dld.shards)))
+	}
+	// Reorder shards in order of their ShardIndex.
+	reorderedShards := make([]*ShardLoader, len(dld.shards))
+	for i := 0; i < len(dld.shards); i++ {
+		shd := dld.shards[i]
+		shardIdx := shd.info.ShardIndex
+		if reorderedShards[shardIdx] != nil {
+			return errors.New(fmt.Sprintf("Both shard %s and "+
+				"shard %s have ShardIndex %d.", shd.path,
+				reorderedShards[shardIdx].path, shardIdx))
+		}
+		reorderedShards[shardIdx] = shd
+	}
+	dld.shards = reorderedShards
+	return nil
+}
+
+func (dld *DataStoreLoader) Load() error {
+	var err error
+	// If data.store.clear was set, clear existing data.
+	if dld.ClearStored {
+		err = dld.clearStored()
+		if err != nil {
+			return err
+		}
+	}
+	// Make sure the shard directories exist in all cases, with a mkdir -p
+	for i := range dld.shards {
+		err := os.MkdirAll(dld.shards[i].path, 0777)
+		if err != nil {
+			return errors.New(fmt.Sprintf("Failed to MkdirAll(%s): %s",
+				dld.shards[i].path, err.Error()))
+		}
+	}
+	// Get information about each shard, and verify them.
+	dld.LoadShards()
+	err = dld.VerifyShardInfos()
+	if err != nil {
+		return err
+	}
+	if dld.shards[0].ldb != nil {
+		dld.lg.Infof("Loaded %d leveldb instances with "+
+			"DaemonId of 0x%016x\n", len(dld.shards),
+			dld.shards[0].info.DaemonId)
+	} else {
+		// Create leveldb instances if needed.
+		rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+		daemonId := uint64(rnd.Int63())
+		dld.lg.Infof("Initializing %d leveldb instances with a new "+
+			"DaemonId of 0x%016x\n", len(dld.shards), daemonId)
+		dld.openOpts.SetCreateIfMissing(true)
+		for i := range dld.shards {
+			shd := dld.shards[i]
+			shd.ldb, err = levigo.Open(shd.path, shd.dld.openOpts)
+			if err != nil {
+				return errors.New(fmt.Sprintf("levigo.Open(%s) failed to "+
+					"create the shard: %s", shd.path, err.Error()))
+			}
+			info := &ShardInfo{
+				LayoutVersion: CURRENT_LAYOUT_VERSION,
+				DaemonId:      daemonId,
+				TotalShards:   uint32(len(dld.shards)),
+				ShardIndex:    uint32(i),
+			}
+			err = shd.writeShardInfo(info)
+			if err != nil {
+				return errors.New(fmt.Sprintf("levigo.Open(%s) failed to "+
+					"write shard info: %s", shd.path, err.Error()))
+			}
+			dld.lg.Infof("Shard %s initialized with ShardInfo %s \n",
+				shd.path, asJson(info))
+		}
+	}
+	return nil
+}
+
+func (dld *DataStoreLoader) clearStored() error {
+	for i := range dld.shards {
+		path := dld.shards[i].path
+		fi, err := os.Stat(path)
+		if err != nil && !os.IsNotExist(err) {
+			dld.lg.Errorf("Failed to stat %s: %s\n", path, err.Error())
+			return err
+		}
+		if fi != nil {
+			err = os.RemoveAll(path)
+			if err != nil {
+				dld.lg.Errorf("Failed to clear existing datastore directory %s: %s\n",
+					path, err.Error())
+				return err
+			}
+			dld.lg.Infof("Cleared existing datastore directory %s\n", path)
+		}
+	}
+	return nil
+}
+
+type ShardLoader struct {
+	// The parent DataStoreLoader
+	dld *DataStoreLoader
+
+	// Path to the shard
+	path string
+
+	// Leveldb instance of the shard
+	ldb *levigo.DB
+
+	// Information about the shard
+	info *ShardInfo
+
+	// If non-null, the error we encountered trying to load the shard info.
+	infoErr error
+}
+
+func (shd *ShardLoader) Close() {
+	if shd.ldb != nil {
+		shd.ldb.Close()
+		shd.ldb = nil
+	}
+}
+
+// Load information about a particular shard.
+func (shd *ShardLoader) load() {
+	shd.info = nil
+	fi, err := os.Stat(shd.path)
+	if err != nil {
+		if os.IsNotExist(err) {
+			shd.infoErr = nil
+			return
+		}
+		shd.infoErr = errors.New(fmt.Sprintf(
+			"stat() error on leveldb directory "+
+				"%s: %s", shd.path, err.Error()))
+		return
+	}
+	if !fi.Mode().IsDir() {
+		shd.infoErr = errors.New(fmt.Sprintf(
+			"stat() error on leveldb directory "+
+				"%s: inode is not directory.", shd.path))
+		return
+	}
+	var dbDir *os.File
+	dbDir, err = os.Open(shd.path)
+	if err != nil {
+		shd.infoErr = errors.New(fmt.Sprintf(
+			"open() error on leveldb directory "+
+				"%s: %s.", shd.path, err.Error()))
+		return
+	}
+	defer func() {
+		if dbDir != nil {
+			dbDir.Close()
+		}
+	}()
+	_, err = dbDir.Readdirnames(1)
+	if err != nil {
+		if err == io.EOF {
+			// The db directory is empty.
+			shd.infoErr = nil
+			return
+		}
+		shd.infoErr = errors.New(fmt.Sprintf(
+			"Readdirnames() error on leveldb directory "+
+				"%s: %s.", shd.path, err.Error()))
+		return
+	}
+	dbDir.Close()
+	dbDir = nil
+	shd.ldb, err = levigo.Open(shd.path, shd.dld.openOpts)
+	if err != nil {
+		shd.ldb = nil
+		shd.infoErr = errors.New(fmt.Sprintf(
+			"levigo.Open() error on leveldb directory "+
+				"%s: %s.", shd.path, err.Error()))
+		return
+	}
+	shd.info, err = shd.readShardInfo()
+	if err != nil {
+		shd.infoErr = err
+		return
+	}
+	shd.infoErr = nil
+}
+
+func (shd *ShardLoader) readShardInfo() (*ShardInfo, error) {
+	buf, err := shd.ldb.Get(shd.dld.readOpts, []byte{SHARD_INFO_KEY})
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("readShardInfo(%s): failed to "+
+			"read shard info key: %s", shd.path, err.Error()))
+	}
+	if len(buf) == 0 {
+		return nil, errors.New(fmt.Sprintf("readShardInfo(%s): got zero-"+
+			"length value for shard info key.", shd.path))
+	}
+	mh := new(codec.MsgpackHandle)
+	mh.WriteExt = true
+	r := bytes.NewBuffer(buf)
+	decoder := codec.NewDecoder(r, mh)
+	shardInfo := &ShardInfo{
+		LayoutVersion: UNKNOWN_LAYOUT_VERSION,
+	}
+	err = decoder.Decode(shardInfo)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("readShardInfo(%s): msgpack "+
+			"decoding failed for shard info key: %s", shd.path, err.Error()))
+	}
+	return shardInfo, nil
+}
+
+func (shd *ShardLoader) writeShardInfo(info *ShardInfo) error {
+	mh := new(codec.MsgpackHandle)
+	mh.WriteExt = true
+	w := new(bytes.Buffer)
+	enc := codec.NewEncoder(w, mh)
+	err := enc.Encode(info)
+	if err != nil {
+		return errors.New(fmt.Sprintf("msgpack encoding error: %s",
+			err.Error()))
+	}
+	err = shd.ldb.Put(shd.dld.writeOpts, []byte{SHARD_INFO_KEY}, w.Bytes())
+	if err != nil {
+		return errors.New(fmt.Sprintf("leveldb write error: %s",
+			err.Error()))
+	}
+	return nil
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/metrics.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/metrics.go b/htrace-htraced/go/src/htrace/htraced/metrics.go
new file mode 100644
index 0000000..d2feca8
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/metrics.go
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"htrace/common"
+	"htrace/conf"
+	"math"
+	"sync"
+	"time"
+)
+
+//
+// The Metrics Sink for HTraced.
+//
+// The Metrics sink keeps track of metrics for the htraced daemon.
+// It is important to have good metrics so that we can properly manager htraced.  In particular, we
+// need to know what rate we are receiving spans at, the main places spans came from.  If spans
+// were dropped because of a high sampling rates, we need to know which part of the system dropped
+// them so that we can adjust the sampling rate there.
+//
+
+const LATENCY_CIRC_BUF_SIZE = 4096
+
+type MetricsSink struct {
+	// The metrics sink logger.
+	lg *common.Logger
+
+	// The maximum number of entries we shuld allow in the HostSpanMetrics map.
+	maxMtx int
+
+	// The total number of spans ingested by the server (counting dropped spans)
+	IngestedSpans uint64
+
+	// The total number of spans written to leveldb since the server started.
+	WrittenSpans uint64
+
+	// The total number of spans dropped by the server.
+	ServerDropped uint64
+
+	// Per-host Span Metrics
+	HostSpanMetrics common.SpanMetricsMap
+
+	// The last few writeSpan latencies
+	wsLatencyCircBuf *CircBufU32
+
+	// Lock protecting all metrics
+	lock sync.Mutex
+}
+
+func NewMetricsSink(cnf *conf.Config) *MetricsSink {
+	return &MetricsSink{
+		lg:               common.NewLogger("metrics", cnf),
+		maxMtx:           cnf.GetInt(conf.HTRACE_METRICS_MAX_ADDR_ENTRIES),
+		HostSpanMetrics:  make(common.SpanMetricsMap),
+		wsLatencyCircBuf: NewCircBufU32(LATENCY_CIRC_BUF_SIZE),
+	}
+}
+
+// Update the total number of spans which were ingested, as well as other
+// metrics that get updated during span ingest.
+func (msink *MetricsSink) UpdateIngested(addr string, totalIngested int,
+	serverDropped int, wsLatency time.Duration) {
+	msink.lock.Lock()
+	defer msink.lock.Unlock()
+	msink.IngestedSpans += uint64(totalIngested)
+	msink.ServerDropped += uint64(serverDropped)
+	msink.updateSpanMetrics(addr, 0, serverDropped)
+	wsLatencyMs := wsLatency.Nanoseconds() / 1000000
+	var wsLatency32 uint32
+	if wsLatencyMs > math.MaxUint32 {
+		wsLatency32 = math.MaxUint32
+	} else {
+		wsLatency32 = uint32(wsLatencyMs)
+	}
+	msink.wsLatencyCircBuf.Append(wsLatency32)
+}
+
+// Update the per-host span metrics.  Must be called with the lock held.
+func (msink *MetricsSink) updateSpanMetrics(addr string, numWritten int,
+	serverDropped int) {
+	mtx, found := msink.HostSpanMetrics[addr]
+	if !found {
+		// Ensure that the per-host span metrics map doesn't grow too large.
+		if len(msink.HostSpanMetrics) >= msink.maxMtx {
+			// Delete a random entry
+			for k := range msink.HostSpanMetrics {
+				msink.lg.Warnf("Evicting metrics entry for addr %s "+
+					"because there are more than %d addrs.\n", k, msink.maxMtx)
+				delete(msink.HostSpanMetrics, k)
+				break
+			}
+		}
+		mtx = &common.SpanMetrics{}
+		msink.HostSpanMetrics[addr] = mtx
+	}
+	mtx.Written += uint64(numWritten)
+	mtx.ServerDropped += uint64(serverDropped)
+}
+
+// Update the total number of spans which were persisted to disk.
+func (msink *MetricsSink) UpdatePersisted(addr string, totalWritten int,
+	serverDropped int) {
+	msink.lock.Lock()
+	defer msink.lock.Unlock()
+	msink.WrittenSpans += uint64(totalWritten)
+	msink.ServerDropped += uint64(serverDropped)
+	msink.updateSpanMetrics(addr, totalWritten, serverDropped)
+}
+
+// Read the server stats.
+func (msink *MetricsSink) PopulateServerStats(stats *common.ServerStats) {
+	msink.lock.Lock()
+	defer msink.lock.Unlock()
+	stats.IngestedSpans = msink.IngestedSpans
+	stats.WrittenSpans = msink.WrittenSpans
+	stats.ServerDroppedSpans = msink.ServerDropped
+	stats.MaxWriteSpansLatencyMs = msink.wsLatencyCircBuf.Max()
+	stats.AverageWriteSpansLatencyMs = msink.wsLatencyCircBuf.Average()
+	stats.HostSpanMetrics = make(common.SpanMetricsMap)
+	for k, v := range msink.HostSpanMetrics {
+		stats.HostSpanMetrics[k] = &common.SpanMetrics{
+			Written:       v.Written,
+			ServerDropped: v.ServerDropped,
+		}
+	}
+}
+
+// A circular buffer of uint32s which supports appending and taking the
+// average, and some other things.
+type CircBufU32 struct {
+	// The next slot to fill
+	slot int
+
+	// The number of slots which are in use.  This number only ever
+	// increases until the buffer is full.
+	slotsUsed int
+
+	// The buffer
+	buf []uint32
+}
+
+func NewCircBufU32(size int) *CircBufU32 {
+	return &CircBufU32{
+		slotsUsed: -1,
+		buf:       make([]uint32, size),
+	}
+}
+
+func (cbuf *CircBufU32) Max() uint32 {
+	var max uint32
+	for bufIdx := 0; bufIdx < cbuf.slotsUsed; bufIdx++ {
+		if cbuf.buf[bufIdx] > max {
+			max = cbuf.buf[bufIdx]
+		}
+	}
+	return max
+}
+
+func (cbuf *CircBufU32) Average() uint32 {
+	var total uint64
+	for bufIdx := 0; bufIdx < cbuf.slotsUsed; bufIdx++ {
+		total += uint64(cbuf.buf[bufIdx])
+	}
+	return uint32(total / uint64(cbuf.slotsUsed))
+}
+
+func (cbuf *CircBufU32) Append(val uint32) {
+	cbuf.buf[cbuf.slot] = val
+	cbuf.slot++
+	if cbuf.slotsUsed < cbuf.slot {
+		cbuf.slotsUsed = cbuf.slot
+	}
+	if cbuf.slot >= len(cbuf.buf) {
+		cbuf.slot = 0
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/metrics_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/metrics_test.go b/htrace-htraced/go/src/htrace/htraced/metrics_test.go
new file mode 100644
index 0000000..4f27ffd
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/metrics_test.go
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"fmt"
+	htrace "htrace/client"
+	"htrace/common"
+	"htrace/conf"
+	"reflect"
+	"testing"
+	"time"
+)
+
+func compareTotals(a, b common.SpanMetricsMap) bool {
+	for k, v := range a {
+		if !reflect.DeepEqual(v, b[k]) {
+			return false
+		}
+	}
+	for k, v := range b {
+		if !reflect.DeepEqual(v, a[k]) {
+			return false
+		}
+	}
+	return true
+}
+
+type Fatalfer interface {
+	Fatalf(format string, args ...interface{})
+}
+
+func assertNumWrittenEquals(t Fatalfer, msink *MetricsSink,
+	expectedNumWritten int) {
+	var sstats common.ServerStats
+	msink.PopulateServerStats(&sstats)
+	if sstats.WrittenSpans != uint64(expectedNumWritten) {
+		t.Fatalf("sstats.WrittenSpans = %d, but expected %d\n",
+			sstats.WrittenSpans, len(SIMPLE_TEST_SPANS))
+	}
+	if sstats.HostSpanMetrics["127.0.0.1"] == nil {
+		t.Fatalf("no entry for sstats.HostSpanMetrics[127.0.0.1] found.")
+	}
+	if sstats.HostSpanMetrics["127.0.0.1"].Written !=
+		uint64(expectedNumWritten) {
+		t.Fatalf("sstats.HostSpanMetrics[127.0.0.1].Written = %d, but "+
+			"expected %d\n", sstats.HostSpanMetrics["127.0.0.1"].Written,
+			len(SIMPLE_TEST_SPANS))
+	}
+}
+
+func TestMetricsSinkPerHostEviction(t *testing.T) {
+	cnfBld := conf.Builder{
+		Values:   conf.TEST_VALUES(),
+		Defaults: conf.DEFAULTS,
+	}
+	cnfBld.Values[conf.HTRACE_METRICS_MAX_ADDR_ENTRIES] = "2"
+	cnf, err := cnfBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create conf: %s", err.Error())
+	}
+	msink := NewMetricsSink(cnf)
+	msink.UpdatePersisted("192.168.0.100", 20, 10)
+	msink.UpdatePersisted("192.168.0.101", 20, 10)
+	msink.UpdatePersisted("192.168.0.102", 20, 10)
+	msink.lock.Lock()
+	defer msink.lock.Unlock()
+	if len(msink.HostSpanMetrics) != 2 {
+		for k, v := range msink.HostSpanMetrics {
+			fmt.Printf("WATERMELON: [%s] = [%s]\n", k, v)
+		}
+		t.Fatalf("Expected len(msink.HostSpanMetrics) to be 2, but got %d\n",
+			len(msink.HostSpanMetrics))
+	}
+}
+
+func TestIngestedSpansMetricsRest(t *testing.T) {
+	testIngestedSpansMetricsImpl(t, false)
+}
+
+func TestIngestedSpansMetricsPacked(t *testing.T) {
+	testIngestedSpansMetricsImpl(t, true)
+}
+
+func testIngestedSpansMetricsImpl(t *testing.T, usePacked bool) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestIngestedSpansMetrics",
+		DataDirs: make([]string, 2),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), &htrace.TestHooks{
+		HrpcDisabled: !usePacked,
+	})
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+
+	NUM_TEST_SPANS := 12
+	allSpans := createRandomTestSpans(NUM_TEST_SPANS)
+	err = hcl.WriteSpans(allSpans)
+	if err != nil {
+		t.Fatalf("WriteSpans failed: %s\n", err.Error())
+	}
+	for {
+		var stats *common.ServerStats
+		stats, err = hcl.GetServerStats()
+		if err != nil {
+			t.Fatalf("GetServerStats failed: %s\n", err.Error())
+		}
+		if stats.IngestedSpans == uint64(NUM_TEST_SPANS) {
+			break
+		}
+		time.Sleep(1 * time.Millisecond)
+	}
+}
+
+func TestCircBuf32(t *testing.T) {
+	cbuf := NewCircBufU32(3)
+	// We arbitrarily define that empty circular buffers have an average of 0.
+	if cbuf.Average() != 0 {
+		t.Fatalf("expected empty CircBufU32 to have an average of 0.\n")
+	}
+	if cbuf.Max() != 0 {
+		t.Fatalf("expected empty CircBufU32 to have a max of 0.\n")
+	}
+	cbuf.Append(2)
+	if cbuf.Average() != 2 {
+		t.Fatalf("expected one-element CircBufU32 to have an average of 2.\n")
+	}
+	cbuf.Append(10)
+	if cbuf.Average() != 6 {
+		t.Fatalf("expected two-element CircBufU32 to have an average of 6.\n")
+	}
+	cbuf.Append(12)
+	if cbuf.Average() != 8 {
+		t.Fatalf("expected three-element CircBufU32 to have an average of 8.\n")
+	}
+	cbuf.Append(14)
+	// The 14 overwrites the original 2 element.
+	if cbuf.Average() != 12 {
+		t.Fatalf("expected three-element CircBufU32 to have an average of 12.\n")
+	}
+	cbuf.Append(1)
+	// The 1 overwrites the original 10 element.
+	if cbuf.Average() != 9 {
+		t.Fatalf("expected three-element CircBufU32 to have an average of 12.\n")
+	}
+	if cbuf.Max() != 14 {
+		t.Fatalf("expected three-element CircBufU32 to have a max of 14.\n")
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/mini_htraced.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/mini_htraced.go b/htrace-htraced/go/src/htrace/htraced/mini_htraced.go
new file mode 100644
index 0000000..af8d379
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/mini_htraced.go
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"fmt"
+	"htrace/common"
+	"htrace/conf"
+	"io/ioutil"
+	"net"
+	"os"
+	"strings"
+)
+
+//
+// MiniHTraceD is used in unit tests to set up a daemon with certain settings.
+// It takes care of things like creating and cleaning up temporary directories.
+//
+
+// The default number of managed data directories to use.
+const DEFAULT_NUM_DATA_DIRS = 2
+
+// Builds a MiniHTraced object.
+type MiniHTracedBuilder struct {
+	// The name of the MiniHTraced to build.  This shows up in the test directory name and some
+	// other places.
+	Name string
+
+	// The configuration values to use for the MiniHTraced.
+	// If ths is nil, we use the default configuration for everything.
+	Cnf map[string]string
+
+	// The DataDirs to use.  Empty entries will turn into random names.
+	DataDirs []string
+
+	// If true, we will keep the data dirs around after MiniHTraced#Close
+	KeepDataDirsOnClose bool
+
+	// If non-null, the WrittenSpans semaphore to use when creating the DataStore.
+	WrittenSpans *common.Semaphore
+
+	// The test hooks to use for the HRPC server
+	HrpcTestHooks *hrpcTestHooks
+}
+
+type MiniHTraced struct {
+	Name                string
+	Cnf                 *conf.Config
+	DataDirs            []string
+	Store               *dataStore
+	Rsv                 *RestServer
+	Hsv                 *HrpcServer
+	Lg                  *common.Logger
+	KeepDataDirsOnClose bool
+}
+
+func (bld *MiniHTracedBuilder) Build() (*MiniHTraced, error) {
+	var err error
+	var store *dataStore
+	var rsv *RestServer
+	var hsv *HrpcServer
+	if bld.Name == "" {
+		bld.Name = "HTraceTest"
+	}
+	if bld.Cnf == nil {
+		bld.Cnf = make(map[string]string)
+	}
+	if bld.DataDirs == nil {
+		bld.DataDirs = make([]string, 2)
+	}
+	for idx := range bld.DataDirs {
+		if bld.DataDirs[idx] == "" {
+			bld.DataDirs[idx], err = ioutil.TempDir(os.TempDir(),
+				fmt.Sprintf("%s%d", bld.Name, idx+1))
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	// Copy the default test configuration values.
+	for k, v := range conf.TEST_VALUES() {
+		_, hasVal := bld.Cnf[k]
+		if !hasVal {
+			bld.Cnf[k] = v
+		}
+	}
+	bld.Cnf[conf.HTRACE_DATA_STORE_DIRECTORIES] =
+		strings.Join(bld.DataDirs, conf.PATH_LIST_SEP)
+	cnfBld := conf.Builder{Values: bld.Cnf, Defaults: conf.DEFAULTS}
+	cnf, err := cnfBld.Build()
+	if err != nil {
+		return nil, err
+	}
+	lg := common.NewLogger("mini.htraced", cnf)
+	defer func() {
+		if err != nil {
+			if store != nil {
+				store.Close()
+			}
+			for idx := range bld.DataDirs {
+				if !bld.KeepDataDirsOnClose {
+					if bld.DataDirs[idx] != "" {
+						os.RemoveAll(bld.DataDirs[idx])
+					}
+				}
+			}
+			if rsv != nil {
+				rsv.Close()
+			}
+			lg.Infof("Failed to create MiniHTraced %s: %s\n", bld.Name, err.Error())
+			lg.Close()
+		}
+	}()
+	store, err = CreateDataStore(cnf, bld.WrittenSpans)
+	if err != nil {
+		return nil, err
+	}
+	rstListener, listenErr := net.Listen("tcp", cnf.Get(conf.HTRACE_WEB_ADDRESS))
+	if listenErr != nil {
+		return nil, listenErr
+	}
+	defer func() {
+		if rstListener != nil {
+			rstListener.Close()
+		}
+	}()
+	rsv, err = CreateRestServer(cnf, store, rstListener)
+	if err != nil {
+		return nil, err
+	}
+	rstListener = nil
+	hsv, err = CreateHrpcServer(cnf, store, bld.HrpcTestHooks)
+	if err != nil {
+		return nil, err
+	}
+
+	lg.Infof("Created MiniHTraced %s\n", bld.Name)
+	return &MiniHTraced{
+		Name:                bld.Name,
+		Cnf:                 cnf,
+		DataDirs:            bld.DataDirs,
+		Store:               store,
+		Rsv:                 rsv,
+		Hsv:                 hsv,
+		Lg:                  lg,
+		KeepDataDirsOnClose: bld.KeepDataDirsOnClose,
+	}, nil
+}
+
+// Return a Config object that clients can use to connect to this MiniHTraceD.
+func (ht *MiniHTraced) ClientConf() *conf.Config {
+	return ht.Cnf.Clone(conf.HTRACE_WEB_ADDRESS, ht.Rsv.Addr().String(),
+		conf.HTRACE_HRPC_ADDRESS, ht.Hsv.Addr().String())
+}
+
+// Return a Config object that clients can use to connect to this MiniHTraceD
+// by HTTP only (no HRPC).
+func (ht *MiniHTraced) RestOnlyClientConf() *conf.Config {
+	return ht.Cnf.Clone(conf.HTRACE_WEB_ADDRESS, ht.Rsv.Addr().String(),
+		conf.HTRACE_HRPC_ADDRESS, "")
+}
+
+func (ht *MiniHTraced) Close() {
+	ht.Lg.Infof("Closing MiniHTraced %s\n", ht.Name)
+	ht.Rsv.Close()
+	ht.Hsv.Close()
+	ht.Store.Close()
+	if !ht.KeepDataDirsOnClose {
+		for idx := range ht.DataDirs {
+			ht.Lg.Infof("Removing %s...\n", ht.DataDirs[idx])
+			os.RemoveAll(ht.DataDirs[idx])
+		}
+	}
+	ht.Lg.Infof("Finished closing MiniHTraced %s\n", ht.Name)
+	ht.Lg.Close()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/reaper_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/reaper_test.go b/htrace-htraced/go/src/htrace/htraced/reaper_test.go
new file mode 100644
index 0000000..af11e38
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/reaper_test.go
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"fmt"
+	"htrace/common"
+	"htrace/conf"
+	"htrace/test"
+	"math/rand"
+	"testing"
+	"time"
+)
+
+func TestReapingOldSpans(t *testing.T) {
+	const NUM_TEST_SPANS = 20
+	testSpans := make([]*common.Span, NUM_TEST_SPANS)
+	rnd := rand.New(rand.NewSource(2))
+	now := common.TimeToUnixMs(time.Now().UTC())
+	for i := range testSpans {
+		testSpans[i] = test.NewRandomSpan(rnd, testSpans[0:i])
+		testSpans[i].Begin = now - int64(NUM_TEST_SPANS-1-i)
+		testSpans[i].Description = fmt.Sprintf("Span%02d", i)
+	}
+	htraceBld := &MiniHTracedBuilder{Name: "TestReapingOldSpans",
+		Cnf: map[string]string{
+			conf.HTRACE_SPAN_EXPIRY_MS:                fmt.Sprintf("%d", 60*60*1000),
+			conf.HTRACE_REAPER_HEARTBEAT_PERIOD_MS:    "1",
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "1",
+		},
+		WrittenSpans: common.NewSemaphore(0),
+		DataDirs:     make([]string, 2),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create mini htraced cluster: %s\n", err.Error())
+	}
+	ing := ht.Store.NewSpanIngestor(ht.Store.lg, "127.0.0.1", "")
+	for spanIdx := range testSpans {
+		ing.IngestSpan(testSpans[spanIdx])
+	}
+	ing.Close(time.Now())
+	// Wait the spans to be created
+	ht.Store.WrittenSpans.Waits(NUM_TEST_SPANS)
+	// Set a reaper date that will remove all the spans except final one.
+	ht.Store.rpr.SetReaperDate(now)
+
+	common.WaitFor(5*time.Minute, time.Millisecond, func() bool {
+		for i := 0; i < NUM_TEST_SPANS-1; i++ {
+			span := ht.Store.FindSpan(testSpans[i].Id)
+			if span != nil {
+				ht.Store.lg.Debugf("Waiting for %s to be removed...\n",
+					testSpans[i].Description)
+				return false
+			}
+		}
+		span := ht.Store.FindSpan(testSpans[NUM_TEST_SPANS-1].Id)
+		if span == nil {
+			ht.Store.lg.Debugf("Did not expect %s to be removed\n",
+				testSpans[NUM_TEST_SPANS-1].Description)
+			return false
+		}
+		return true
+	})
+	defer ht.Close()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/rest.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/rest.go b/htrace-htraced/go/src/htrace/htraced/rest.go
new file mode 100644
index 0000000..1ba4791
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/rest.go
@@ -0,0 +1,376 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"github.com/gorilla/mux"
+	"htrace/common"
+	"htrace/conf"
+	"net"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Set the response headers.
+func setResponseHeaders(hdr http.Header) {
+	hdr.Set("Content-Type", "application/json")
+}
+
+// Write a JSON error response.
+func writeError(lg *common.Logger, w http.ResponseWriter, errCode int,
+	errStr string) {
+	str := strings.Replace(errStr, `"`, `'`, -1)
+	lg.Info(str + "\n")
+	w.WriteHeader(errCode)
+	w.Write([]byte(`{ "error" : "` + str + `"}`))
+}
+
+type serverVersionHandler struct {
+	lg *common.Logger
+}
+
+func (hand *serverVersionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	setResponseHeaders(w.Header())
+	version := common.ServerVersion{ReleaseVersion: RELEASE_VERSION,
+		GitVersion: GIT_VERSION}
+	buf, err := json.Marshal(&version)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusInternalServerError,
+			fmt.Sprintf("error marshalling ServerVersion: %s\n", err.Error()))
+		return
+	}
+	if hand.lg.DebugEnabled() {
+		hand.lg.Debugf("Returned ServerVersion %s\n", string(buf))
+	}
+	w.Write(buf)
+}
+
+type serverDebugInfoHandler struct {
+	lg *common.Logger
+}
+
+func (hand *serverDebugInfoHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	setResponseHeaders(w.Header())
+	buf := make([]byte, 1<<20)
+	common.GetStackTraces(&buf)
+	resp := common.ServerDebugInfo{
+		StackTraces: string(buf),
+		GCStats:     common.GetGCStats(),
+	}
+	buf, err := json.Marshal(&resp)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusInternalServerError,
+			fmt.Sprintf("error marshalling ServerDebugInfo: %s\n", err.Error()))
+		return
+	}
+	w.Write(buf)
+	hand.lg.Info("Returned ServerDebugInfo\n")
+}
+
+type serverStatsHandler struct {
+	dataStoreHandler
+}
+
+func (hand *serverStatsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	setResponseHeaders(w.Header())
+	hand.lg.Debugf("serverStatsHandler\n")
+	stats := hand.store.ServerStats()
+	buf, err := json.Marshal(&stats)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusInternalServerError,
+			fmt.Sprintf("error marshalling ServerStats: %s\n", err.Error()))
+		return
+	}
+	hand.lg.Debugf("Returned ServerStats %s\n", string(buf))
+	w.Write(buf)
+}
+
+type serverConfHandler struct {
+	cnf *conf.Config
+	lg  *common.Logger
+}
+
+func (hand *serverConfHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	setResponseHeaders(w.Header())
+	hand.lg.Debugf("serverConfHandler\n")
+	cnfMap := hand.cnf.Export()
+	buf, err := json.Marshal(&cnfMap)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusInternalServerError,
+			fmt.Sprintf("error marshalling serverConf: %s\n", err.Error()))
+		return
+	}
+	hand.lg.Debugf("Returned server configuration %s\n", string(buf))
+	w.Write(buf)
+}
+
+type dataStoreHandler struct {
+	lg    *common.Logger
+	store *dataStore
+}
+
+func (hand *dataStoreHandler) parseSid(w http.ResponseWriter,
+	str string) (common.SpanId, bool) {
+	var id common.SpanId
+	err := id.FromString(str)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusBadRequest,
+			fmt.Sprintf("Failed to parse span ID %s: %s", str, err.Error()))
+		w.Write([]byte("Error parsing : " + err.Error()))
+		return common.INVALID_SPAN_ID, false
+	}
+	return id, true
+}
+
+func (hand *dataStoreHandler) getReqField32(fieldName string, w http.ResponseWriter,
+	req *http.Request) (int32, bool) {
+	str := req.FormValue(fieldName)
+	if str == "" {
+		writeError(hand.lg, w, http.StatusBadRequest, fmt.Sprintf("No %s specified.", fieldName))
+		return -1, false
+	}
+	val, err := strconv.ParseUint(str, 16, 32)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusBadRequest,
+			fmt.Sprintf("Error parsing %s: %s.", fieldName, err.Error()))
+		return -1, false
+	}
+	return int32(val), true
+}
+
+type findSidHandler struct {
+	dataStoreHandler
+}
+
+func (hand *findSidHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	setResponseHeaders(w.Header())
+	req.ParseForm()
+	vars := mux.Vars(req)
+	stringSid := vars["id"]
+	sid, ok := hand.parseSid(w, stringSid)
+	if !ok {
+		return
+	}
+	hand.lg.Debugf("findSidHandler(sid=%s)\n", sid.String())
+	span := hand.store.FindSpan(sid)
+	if span == nil {
+		writeError(hand.lg, w, http.StatusNoContent,
+			fmt.Sprintf("No such span as %s\n", sid.String()))
+		return
+	}
+	w.Write(span.ToJson())
+}
+
+type findChildrenHandler struct {
+	dataStoreHandler
+}
+
+func (hand *findChildrenHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	setResponseHeaders(w.Header())
+	req.ParseForm()
+	vars := mux.Vars(req)
+	stringSid := vars["id"]
+	sid, ok := hand.parseSid(w, stringSid)
+	if !ok {
+		return
+	}
+	var lim int32
+	lim, ok = hand.getReqField32("lim", w, req)
+	if !ok {
+		return
+	}
+	hand.lg.Debugf("findChildrenHandler(sid=%s, lim=%d)\n", sid.String(), lim)
+	children := hand.store.FindChildren(sid, lim)
+	jbytes, err := json.Marshal(children)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusInternalServerError,
+			fmt.Sprintf("Error marshalling children: %s", err.Error()))
+		return
+	}
+	w.Write(jbytes)
+}
+
+type writeSpansHandler struct {
+	dataStoreHandler
+}
+
+func (hand *writeSpansHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	startTime := time.Now()
+	setResponseHeaders(w.Header())
+	client, _, serr := net.SplitHostPort(req.RemoteAddr)
+	if serr != nil {
+		writeError(hand.lg, w, http.StatusBadRequest,
+			fmt.Sprintf("Failed to split host and port for %s: %s\n",
+				req.RemoteAddr, serr.Error()))
+		return
+	}
+	dec := json.NewDecoder(req.Body)
+	var msg common.WriteSpansReq
+	err := dec.Decode(&msg)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusBadRequest,
+			fmt.Sprintf("Error parsing WriteSpansReq: %s", err.Error()))
+		return
+	}
+	if hand.lg.TraceEnabled() {
+		hand.lg.Tracef("%s: read WriteSpans REST message: %s\n",
+			req.RemoteAddr, asJson(&msg))
+	}
+	ing := hand.store.NewSpanIngestor(hand.lg, client, msg.DefaultTrid)
+	for spanIdx := 0; spanIdx < msg.NumSpans; spanIdx++ {
+		var span *common.Span
+		err := dec.Decode(&span)
+		if err != nil {
+			writeError(hand.lg, w, http.StatusBadRequest,
+				fmt.Sprintf("Failed to decode span %d out of %d: ",
+					spanIdx, msg.NumSpans, err.Error()))
+			return
+		}
+		ing.IngestSpan(span)
+	}
+	ing.Close(startTime)
+	return
+}
+
+type queryHandler struct {
+	lg *common.Logger
+	dataStoreHandler
+}
+
+func (hand *queryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	setResponseHeaders(w.Header())
+	queryString := req.FormValue("query")
+	if queryString == "" {
+		writeError(hand.lg, w, http.StatusBadRequest, "No query provided.\n")
+		return
+	}
+	var query common.Query
+	reader := bytes.NewBufferString(queryString)
+	dec := json.NewDecoder(reader)
+	err := dec.Decode(&query)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusBadRequest,
+			fmt.Sprintf("Error parsing query '%s': %s", queryString, err.Error()))
+		return
+	}
+	var results []*common.Span
+	results, err, _ = hand.store.HandleQuery(&query)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusInternalServerError,
+			fmt.Sprintf("Internal error processing query %s: %s",
+				query.String(), err.Error()))
+		return
+	}
+	var jbytes []byte
+	jbytes, err = json.Marshal(results)
+	if err != nil {
+		writeError(hand.lg, w, http.StatusInternalServerError,
+			fmt.Sprintf("Error marshalling results: %s", err.Error()))
+		return
+	}
+	w.Write(jbytes)
+}
+
+type logErrorHandler struct {
+	lg *common.Logger
+}
+
+func (hand *logErrorHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	hand.lg.Errorf("Got unknown request %s\n", req.RequestURI)
+	writeError(hand.lg, w, http.StatusBadRequest, "Unknown request.")
+}
+
+type RestServer struct {
+	http.Server
+	listener net.Listener
+	lg       *common.Logger
+}
+
+func CreateRestServer(cnf *conf.Config, store *dataStore,
+	listener net.Listener) (*RestServer, error) {
+	var err error
+	rsv := &RestServer{}
+	rsv.lg = common.NewLogger("rest", cnf)
+
+	r := mux.NewRouter().StrictSlash(false)
+
+	r.Handle("/server/info", &serverVersionHandler{lg: rsv.lg}).Methods("GET")
+	r.Handle("/server/version", &serverVersionHandler{lg: rsv.lg}).Methods("GET")
+	r.Handle("/server/debugInfo", &serverDebugInfoHandler{lg: rsv.lg}).Methods("GET")
+
+	serverStatsH := &serverStatsHandler{dataStoreHandler: dataStoreHandler{
+		store: store, lg: rsv.lg}}
+	r.Handle("/server/stats", serverStatsH).Methods("GET")
+
+	serverConfH := &serverConfHandler{cnf: cnf, lg: rsv.lg}
+	r.Handle("/server/conf", serverConfH).Methods("GET")
+
+	writeSpansH := &writeSpansHandler{dataStoreHandler: dataStoreHandler{
+		store: store, lg: rsv.lg}}
+	r.Handle("/writeSpans", writeSpansH).Methods("POST")
+
+	queryH := &queryHandler{lg: rsv.lg, dataStoreHandler: dataStoreHandler{store: store}}
+	r.Handle("/query", queryH).Methods("GET")
+
+	span := r.PathPrefix("/span").Subrouter()
+	findSidH := &findSidHandler{dataStoreHandler: dataStoreHandler{store: store, lg: rsv.lg}}
+	span.Handle("/{id}", findSidH).Methods("GET")
+
+	findChildrenH := &findChildrenHandler{dataStoreHandler: dataStoreHandler{store: store,
+		lg: rsv.lg}}
+	span.Handle("/{id}/children", findChildrenH).Methods("GET")
+
+	// Default Handler. This will serve requests for static requests.
+	webdir := os.Getenv("HTRACED_WEB_DIR")
+	if webdir == "" {
+		webdir, err = filepath.Abs(filepath.Join(filepath.Dir(os.Args[0]), "..", "web"))
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	rsv.lg.Infof(`Serving static files from "%s"`+"\n", webdir)
+	r.PathPrefix("/").Handler(http.FileServer(http.Dir(webdir))).Methods("GET")
+
+	// Log an error message for unknown non-GET requests.
+	r.PathPrefix("/").Handler(&logErrorHandler{lg: rsv.lg})
+
+	rsv.listener = listener
+	rsv.Handler = r
+	rsv.ErrorLog = rsv.lg.Wrap("[REST] ", common.INFO)
+	go rsv.Serve(rsv.listener)
+	rsv.lg.Infof("Started REST server on %s\n", rsv.listener.Addr().String())
+	return rsv, nil
+}
+
+func (rsv *RestServer) Addr() net.Addr {
+	return rsv.listener.Addr()
+}
+
+func (rsv *RestServer) Close() {
+	rsv.listener.Close()
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htracedTool/cmd.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htracedTool/cmd.go b/htrace-htraced/go/src/htrace/htracedTool/cmd.go
new file mode 100644
index 0000000..65b67e5
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htracedTool/cmd.go
@@ -0,0 +1,442 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/alecthomas/kingpin"
+	htrace "htrace/client"
+	"htrace/common"
+	"htrace/conf"
+	"io"
+	"os"
+	"sort"
+	"strings"
+	"text/tabwriter"
+	"time"
+)
+
+var RELEASE_VERSION string
+var GIT_VERSION string
+
+const EXIT_SUCCESS = 0
+const EXIT_FAILURE = 1
+
+var verbose bool
+
+const USAGE = `The Apache HTrace command-line tool.  This tool retrieves and modifies settings and
+other data on a running htraced daemon.
+
+If we find an ` + conf.CONFIG_FILE_NAME + ` configuration file in the list of directories
+specified in ` + conf.HTRACED_CONF_DIR + `, we will use that configuration; otherwise,
+the defaults will be used.
+`
+
+func main() {
+	// Load htraced configuration
+	cnf, cnfLog := conf.LoadApplicationConfig("htrace.tool.")
+	lg := common.NewLogger("conf", cnf)
+	defer lg.Close()
+	scanner := bufio.NewScanner(cnfLog)
+	for scanner.Scan() {
+		lg.Debugf(scanner.Text() + "\n")
+	}
+
+	// Parse argv
+	app := kingpin.New(os.Args[0], USAGE)
+	app.Flag("Dmy.key", "Set configuration key 'my.key' to 'my.value'.  Replace 'my.key' "+
+		"with any key you want to set.").Default("my.value").String()
+	addr := app.Flag("addr", "Server address.").String()
+	verbose = *app.Flag("verbose", "Verbose.").Default("false").Bool()
+	version := app.Command("version", "Print the version of this program.")
+	serverVersion := app.Command("serverVersion", "Print the version of the htraced server.")
+	serverStats := app.Command("serverStats", "Print statistics retrieved from the htraced server.")
+	serverStatsJson := serverStats.Flag("json", "Display statistics as raw JSON.").Default("false").Bool()
+	serverDebugInfo := app.Command("serverDebugInfo", "Print the debug info of the htraced server.")
+	serverConf := app.Command("serverConf", "Print the server configuration retrieved from the htraced server.")
+	findSpan := app.Command("findSpan", "Print information about a trace span with a given ID.")
+	findSpanId := findSpan.Arg("id", "Span ID to find. Example: be305e54-4534-2110-a0b2-e06b9effe112").Required().String()
+	findChildren := app.Command("findChildren", "Print out the span IDs that are children of a given span ID.")
+	parentSpanId := findChildren.Arg("id", "Span ID to print children for. Example: be305e54-4534-2110-a0b2-e06b9effe112").
+		Required().String()
+	childLim := findChildren.Flag("lim", "Maximum number of child IDs to print.").Default("20").Int()
+	loadFile := app.Command("loadFile", "Write whitespace-separated JSON spans from a file to the server.")
+	loadFilePath := loadFile.Arg("path",
+		"A file containing whitespace-separated span JSON.").Required().String()
+	loadJson := app.Command("load", "Write JSON spans from the command-line to the server.")
+	loadJsonArg := loadJson.Arg("json", "A JSON span to write to the server.").Required().String()
+	dumpAll := app.Command("dumpAll", "Dump all spans from the htraced daemon.")
+	dumpAllOutPath := dumpAll.Arg("path", "The path to dump the trace spans to.").Default("-").String()
+	dumpAllLim := dumpAll.Flag("lim", "The number of spans to transfer from the server at once.").
+		Default("100").Int()
+	graph := app.Command("graph", "Visualize span JSON as a graph.")
+	graphJsonFile := graph.Arg("input", "The JSON file to load").Required().String()
+	graphDotFile := graph.Flag("output",
+		"The path to write a GraphViz dotfile to.  This file can be used as input to "+
+			"GraphViz, in order to generate a pretty picture.  See graphviz.org for more "+
+			"information about generating pictures of graphs.").Default("-").String()
+	query := app.Command("query", "Send a query to htraced.")
+	queryLim := query.Flag("lim", "Maximum number of spans to retrieve.").Default("20").Int()
+	queryArg := query.Arg("query", "The query string to send.  Query strings have the format "+
+		"[TYPE] [OPERATOR] [CONST], joined by AND statements.").Required().String()
+	rawQuery := app.Command("rawQuery", "Send a raw JSON query to htraced.")
+	rawQueryArg := rawQuery.Arg("json", "The query JSON to send.").Required().String()
+	cmd := kingpin.MustParse(app.Parse(os.Args[1:]))
+
+	// Add the command-line settings into the configuration.
+	if *addr != "" {
+		cnf = cnf.Clone(conf.HTRACE_WEB_ADDRESS, *addr)
+	}
+
+	// Handle commands that don't require an HTrace client.
+	switch cmd {
+	case version.FullCommand():
+		os.Exit(printVersion())
+	case graph.FullCommand():
+		err := jsonSpanFileToDotFile(*graphJsonFile, *graphDotFile)
+		if err != nil {
+			fmt.Printf("graphing error: %s\n", err.Error())
+			os.Exit(EXIT_FAILURE)
+		}
+		os.Exit(EXIT_SUCCESS)
+	}
+
+	// Create HTrace client
+	hcl, err := htrace.NewClient(cnf, nil)
+	if err != nil {
+		fmt.Printf("Failed to create HTrace client: %s\n", err.Error())
+		os.Exit(EXIT_FAILURE)
+	}
+
+	// Handle commands that require an HTrace client.
+	switch cmd {
+	case version.FullCommand():
+		os.Exit(printVersion())
+	case serverVersion.FullCommand():
+		os.Exit(printServerVersion(hcl))
+	case serverStats.FullCommand():
+		if *serverStatsJson {
+			os.Exit(printServerStatsJson(hcl))
+		} else {
+			os.Exit(printServerStats(hcl))
+		}
+	case serverDebugInfo.FullCommand():
+		os.Exit(printServerDebugInfo(hcl))
+	case serverConf.FullCommand():
+		os.Exit(printServerConfJson(hcl))
+	case findSpan.FullCommand():
+		var id *common.SpanId
+		id.FromString(*findSpanId)
+		os.Exit(doFindSpan(hcl, *id))
+	case findChildren.FullCommand():
+		var id *common.SpanId
+		id.FromString(*parentSpanId)
+		os.Exit(doFindChildren(hcl, *id, *childLim))
+	case loadJson.FullCommand():
+		os.Exit(doLoadSpanJson(hcl, *loadJsonArg))
+	case loadFile.FullCommand():
+		os.Exit(doLoadSpanJsonFile(hcl, *loadFilePath))
+	case dumpAll.FullCommand():
+		err := doDumpAll(hcl, *dumpAllOutPath, *dumpAllLim)
+		if err != nil {
+			fmt.Printf("dumpAll error: %s\n", err.Error())
+			os.Exit(EXIT_FAILURE)
+		}
+		os.Exit(EXIT_SUCCESS)
+	case query.FullCommand():
+		err := doQueryFromString(hcl, *queryArg, *queryLim)
+		if err != nil {
+			fmt.Printf("query error: %s\n", err.Error())
+			os.Exit(EXIT_FAILURE)
+		}
+		os.Exit(EXIT_SUCCESS)
+	case rawQuery.FullCommand():
+		err := doRawQuery(hcl, *rawQueryArg)
+		if err != nil {
+			fmt.Printf("raw query error: %s\n", err.Error())
+			os.Exit(EXIT_FAILURE)
+		}
+		os.Exit(EXIT_SUCCESS)
+	}
+
+	app.UsageErrorf(os.Stderr, "You must supply a command to run.")
+}
+
+// Print the version of the htrace binary.
+func printVersion() int {
+	fmt.Printf("Running htracedTool %s [%s].\n", RELEASE_VERSION, GIT_VERSION)
+	return EXIT_SUCCESS
+}
+
+// Print information retrieved from an htraced server via /server/info
+func printServerVersion(hcl *htrace.Client) int {
+	ver, err := hcl.GetServerVersion()
+	if err != nil {
+		fmt.Println(err.Error())
+		return EXIT_FAILURE
+	}
+	fmt.Printf("HTraced server version %s (%s)\n", ver.ReleaseVersion, ver.GitVersion)
+	return EXIT_SUCCESS
+}
+
+// Print information retrieved from an htraced server via /server/info
+func printServerStats(hcl *htrace.Client) int {
+	stats, err := hcl.GetServerStats()
+	if err != nil {
+		fmt.Println(err.Error())
+		return EXIT_FAILURE
+	}
+	w := new(tabwriter.Writer)
+	w.Init(os.Stdout, 0, 8, 0, '\t', 0)
+	fmt.Fprintf(w, "HTRACED SERVER STATS\n")
+	fmt.Fprintf(w, "Datastore Start\t%s\n",
+		common.UnixMsToTime(stats.LastStartMs).Format(time.RFC3339))
+	fmt.Fprintf(w, "Server Time\t%s\n",
+		common.UnixMsToTime(stats.CurMs).Format(time.RFC3339))
+	fmt.Fprintf(w, "Spans reaped\t%d\n", stats.ReapedSpans)
+	fmt.Fprintf(w, "Spans ingested\t%d\n", stats.IngestedSpans)
+	fmt.Fprintf(w, "Spans written\t%d\n", stats.WrittenSpans)
+	fmt.Fprintf(w, "Spans dropped by server\t%d\n", stats.ServerDroppedSpans)
+	dur := time.Millisecond * time.Duration(stats.AverageWriteSpansLatencyMs)
+	fmt.Fprintf(w, "Average WriteSpan Latency\t%s\n", dur.String())
+	dur = time.Millisecond * time.Duration(stats.MaxWriteSpansLatencyMs)
+	fmt.Fprintf(w, "Maximum WriteSpan Latency\t%s\n", dur.String())
+	fmt.Fprintf(w, "Number of leveldb directories\t%d\n", len(stats.Dirs))
+	w.Flush()
+	fmt.Println("")
+	for i := range stats.Dirs {
+		dir := stats.Dirs[i]
+		fmt.Printf("==== %s ===\n", dir.Path)
+		fmt.Printf("Approximate number of bytes: %d\n", dir.ApproximateBytes)
+		stats := strings.Replace(dir.LevelDbStats, "\\n", "\n", -1)
+		fmt.Printf("%s\n", stats)
+	}
+	w = new(tabwriter.Writer)
+	w.Init(os.Stdout, 0, 8, 0, '\t', 0)
+	fmt.Fprintf(w, "HOST SPAN METRICS\n")
+	mtxMap := stats.HostSpanMetrics
+	keys := make(sort.StringSlice, len(mtxMap))
+	i := 0
+	for k, _ := range mtxMap {
+		keys[i] = k
+		i++
+	}
+	sort.Sort(keys)
+	for k := range keys {
+		mtx := mtxMap[keys[k]]
+		fmt.Fprintf(w, "%s\twritten: %d\tserver dropped: %d\n",
+			keys[k], mtx.Written, mtx.ServerDropped)
+	}
+	w.Flush()
+	return EXIT_SUCCESS
+}
+
+// Print information retrieved from an htraced server via /server/info as JSON
+func printServerStatsJson(hcl *htrace.Client) int {
+	stats, err := hcl.GetServerStats()
+	if err != nil {
+		fmt.Println(err.Error())
+		return EXIT_FAILURE
+	}
+	buf, err := json.MarshalIndent(stats, "", "  ")
+	if err != nil {
+		fmt.Printf("Error marshalling server stats: %s", err.Error())
+		return EXIT_FAILURE
+	}
+	fmt.Printf("%s\n", string(buf))
+	return EXIT_SUCCESS
+}
+
+// Print information retrieved from an htraced server via /server/debugInfo
+func printServerDebugInfo(hcl *htrace.Client) int {
+	stats, err := hcl.GetServerDebugInfo()
+	if err != nil {
+		fmt.Println(err.Error())
+		return EXIT_FAILURE
+	}
+	fmt.Println("=== GOROUTINE STACKS ===")
+	fmt.Print(stats.StackTraces)
+	fmt.Println("=== END GOROUTINE STACKS ===")
+	fmt.Println("=== GC STATISTICS ===")
+	fmt.Print(stats.GCStats)
+	fmt.Println("=== END GC STATISTICS ===")
+	return EXIT_SUCCESS
+}
+
+// Print information retrieved from an htraced server via /server/conf as JSON
+func printServerConfJson(hcl *htrace.Client) int {
+	cnf, err := hcl.GetServerConf()
+	if err != nil {
+		fmt.Println(err.Error())
+		return EXIT_FAILURE
+	}
+	buf, err := json.MarshalIndent(cnf, "", "  ")
+	if err != nil {
+		fmt.Printf("Error marshalling server conf: %s", err.Error())
+		return EXIT_FAILURE
+	}
+	fmt.Printf("%s\n", string(buf))
+	return EXIT_SUCCESS
+}
+
+// Print information about a trace span.
+func doFindSpan(hcl *htrace.Client, sid common.SpanId) int {
+	span, err := hcl.FindSpan(sid)
+	if err != nil {
+		fmt.Println(err.Error())
+		return EXIT_FAILURE
+	}
+	if span == nil {
+		fmt.Printf("Span ID not found.\n")
+		return EXIT_FAILURE
+	}
+	pbuf, err := json.MarshalIndent(span, "", "  ")
+	if err != nil {
+		fmt.Printf("Error: error pretty-printing span to JSON: %s\n", err.Error())
+		return EXIT_FAILURE
+	}
+	fmt.Printf("%s\n", string(pbuf))
+	return EXIT_SUCCESS
+}
+
+func doLoadSpanJsonFile(hcl *htrace.Client, spanFile string) int {
+	if spanFile == "" {
+		fmt.Printf("You must specify the json file to load.\n")
+		return EXIT_FAILURE
+	}
+	file, err := OpenInputFile(spanFile)
+	if err != nil {
+		fmt.Printf("Failed to open %s: %s\n", spanFile, err.Error())
+		return EXIT_FAILURE
+	}
+	defer file.Close()
+	return doLoadSpans(hcl, bufio.NewReader(file))
+}
+
+func doLoadSpanJson(hcl *htrace.Client, spanJson string) int {
+	return doLoadSpans(hcl, bytes.NewBufferString(spanJson))
+}
+
+func doLoadSpans(hcl *htrace.Client, reader io.Reader) int {
+	dec := json.NewDecoder(reader)
+	spans := make([]*common.Span, 0, 32)
+	var err error
+	for {
+		var span common.Span
+		if err = dec.Decode(&span); err != nil {
+			if err == io.EOF {
+				break
+			}
+			fmt.Printf("Failed to decode JSON: %s\n", err.Error())
+			return EXIT_FAILURE
+		}
+		spans = append(spans, &span)
+	}
+	if verbose {
+		fmt.Printf("Writing ")
+		prefix := ""
+		for i := range spans {
+			fmt.Printf("%s%s", prefix, spans[i].ToJson())
+			prefix = ", "
+		}
+		fmt.Printf("\n")
+	}
+	err = hcl.WriteSpans(spans)
+	if err != nil {
+		fmt.Println(err.Error())
+		return EXIT_FAILURE
+	}
+	return EXIT_SUCCESS
+}
+
+// Find information about the children of a span.
+func doFindChildren(hcl *htrace.Client, sid common.SpanId, lim int) int {
+	spanIds, err := hcl.FindChildren(sid, lim)
+	if err != nil {
+		fmt.Printf("%s\n", err.Error())
+		return EXIT_FAILURE
+	}
+	pbuf, err := json.MarshalIndent(spanIds, "", "  ")
+	if err != nil {
+		fmt.Println("Error: error pretty-printing span IDs to JSON: %s", err.Error())
+		return 1
+	}
+	fmt.Printf("%s\n", string(pbuf))
+	return 0
+}
+
+// Dump all spans from the htraced daemon.
+func doDumpAll(hcl *htrace.Client, outPath string, lim int) error {
+	file, err := CreateOutputFile(outPath)
+	if err != nil {
+		return err
+	}
+	w := bufio.NewWriter(file)
+	defer func() {
+		if file != nil {
+			w.Flush()
+			file.Close()
+		}
+	}()
+	out := make(chan *common.Span, 50)
+	var dumpErr error
+	go func() {
+		dumpErr = hcl.DumpAll(lim, out)
+	}()
+	var numSpans int64
+	nextLogTime := time.Now().Add(time.Second * 5)
+	for {
+		span, channelOpen := <-out
+		if !channelOpen {
+			break
+		}
+		if err == nil {
+			_, err = fmt.Fprintf(w, "%s\n", span.ToJson())
+		}
+		if verbose {
+			numSpans++
+			now := time.Now()
+			if !now.Before(nextLogTime) {
+				nextLogTime = now.Add(time.Second * 5)
+				fmt.Printf("received %d span(s)...\n", numSpans)
+			}
+		}
+	}
+	if err != nil {
+		return errors.New(fmt.Sprintf("Write error %s", err.Error()))
+	}
+	if dumpErr != nil {
+		return errors.New(fmt.Sprintf("Dump error %s", dumpErr.Error()))
+	}
+	err = w.Flush()
+	if err != nil {
+		return err
+	}
+	err = file.Close()
+	file = nil
+	if err != nil {
+		return err
+	}
+	return nil
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htracedTool/file.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htracedTool/file.go b/htrace-htraced/go/src/htrace/htracedTool/file.go
new file mode 100644
index 0000000..ca9c18d
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htracedTool/file.go
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bufio"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"htrace/common"
+	"io"
+	"os"
+)
+
+// A file used for input.
+// Transparently supports using stdin for input.
+type InputFile struct {
+	*os.File
+	path string
+}
+
+// Open an input file.  Stdin will be used when path is -
+func OpenInputFile(path string) (*InputFile, error) {
+	if path == "-" {
+		return &InputFile{File: os.Stdin, path: path}, nil
+	}
+	file, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	return &InputFile{File: file, path: path}, nil
+}
+
+func (file *InputFile) Close() {
+	if file.path != "-" {
+		file.File.Close()
+	}
+}
+
+// A file used for output.
+// Transparently supports using stdout for output.
+type OutputFile struct {
+	*os.File
+	path string
+}
+
+// Create an output file.  Stdout will be used when path is -
+func CreateOutputFile(path string) (*OutputFile, error) {
+	if path == "-" {
+		return &OutputFile{File: os.Stdout, path: path}, nil
+	}
+	file, err := os.Create(path)
+	if err != nil {
+		return nil, err
+	}
+	return &OutputFile{File: file, path: path}, nil
+}
+
+func (file *OutputFile) Close() error {
+	if file.path != "-" {
+		return file.File.Close()
+	}
+	return nil
+}
+
+// FailureDeferringWriter is a writer which allows us to call Printf multiple
+// times and then check if all the printfs succeeded at the very end, rather
+// than checking after each call.   We will not attempt to write more data
+// after the first write failure.
+type FailureDeferringWriter struct {
+	io.Writer
+	err error
+}
+
+func NewFailureDeferringWriter(writer io.Writer) *FailureDeferringWriter {
+	return &FailureDeferringWriter{writer, nil}
+}
+
+func (w *FailureDeferringWriter) Printf(format string, v ...interface{}) {
+	if w.err != nil {
+		return
+	}
+	str := fmt.Sprintf(format, v...)
+	_, err := w.Writer.Write([]byte(str))
+	if err != nil {
+		w.err = err
+	}
+}
+
+func (w *FailureDeferringWriter) Error() error {
+	return w.err
+}
+
+// Read a file full of whitespace-separated span JSON into a slice of spans.
+func readSpansFile(path string) (common.SpanSlice, error) {
+	file, err := OpenInputFile(path)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	return readSpans(bufio.NewReader(file))
+}
+
+// Read whitespace-separated span JSON into a slice of spans.
+func readSpans(reader io.Reader) (common.SpanSlice, error) {
+	spans := make(common.SpanSlice, 0)
+	dec := json.NewDecoder(reader)
+	for {
+		var span common.Span
+		err := dec.Decode(&span)
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New(fmt.Sprintf("Decode error after decoding %d "+
+					"span(s): %s", len(spans), err.Error()))
+			}
+			break
+		}
+		spans = append(spans, &span)
+	}
+	return spans, nil
+}



[6/7] incubator-htrace git commit: HTRACE-357. Rename htrace-htraced/go/src/org/apache/htrace to htrace-htraced/go/src/htrace (Colin Patrick McCabe via iwasakims)

Posted by iw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/conf/config_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/conf/config_test.go b/htrace-htraced/go/src/htrace/conf/config_test.go
new file mode 100644
index 0000000..bdab187
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/conf/config_test.go
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package conf
+
+import (
+	"bytes"
+	"os"
+	"strings"
+	"testing"
+)
+
+// Test that parsing command-line arguments of the form -Dfoo=bar works.
+func TestParseArgV(t *testing.T) {
+	t.Parallel()
+	argv := []string{"-Dfoo=bar", "-Dbaz=123", "-DsillyMode", "-Dlog.path="}
+	bld := &Builder{Argv: argv,
+		Defaults: map[string]string{
+			"log.path": "/log/path/default",
+		}}
+	cnf, err := bld.Build()
+	if err != nil {
+		t.Fatal()
+	}
+	if "bar" != cnf.Get("foo") {
+		t.Fatal()
+	}
+	if 123 != cnf.GetInt("baz") {
+		t.Fatal()
+	}
+	if !cnf.GetBool("sillyMode") {
+		t.Fatal()
+	}
+	if cnf.GetBool("otherSillyMode") {
+		t.Fatal()
+	}
+	if "" != cnf.Get("log.path") {
+		t.Fatal()
+	}
+}
+
+// Test that default values work.
+// Defaults are used only when the configuration option is not present or can't be parsed.
+func TestDefaults(t *testing.T) {
+	t.Parallel()
+	argv := []string{"-Dfoo=bar", "-Dbaz=invalidNumber"}
+	defaults := map[string]string{
+		"foo":  "notbar",
+		"baz":  "456",
+		"foo2": "4611686018427387904",
+	}
+	bld := &Builder{Argv: argv, Defaults: defaults}
+	cnf, err := bld.Build()
+	if err != nil {
+		t.Fatal()
+	}
+	if "bar" != cnf.Get("foo") {
+		t.Fatal()
+	}
+	if 456 != cnf.GetInt("baz") {
+		t.Fatal()
+	}
+	if 4611686018427387904 != cnf.GetInt64("foo2") {
+		t.Fatal()
+	}
+}
+
+// Test that we can parse our XML configuration file.
+func TestXmlConfigurationFile(t *testing.T) {
+	t.Parallel()
+	xml := `
+<?xml version="1.0"?>
+<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
+<configuration>
+  <property>
+    <name>foo.bar</name>
+    <value>123</value>
+  </property>
+  <property>
+    <name>foo.baz</name>
+    <value>xmlValue</value>
+  </property>
+  <!--<property>
+    <name>commented.out</name>
+    <value>stuff</value>
+  </property>-->
+</configuration>
+`
+	xmlReader := strings.NewReader(xml)
+	argv := []string{"-Dfoo.bar=456"}
+	defaults := map[string]string{
+		"foo.bar":     "789",
+		"cmdline.opt": "4611686018427387904",
+	}
+	bld := &Builder{Argv: argv, Defaults: defaults, Reader: xmlReader}
+	cnf, err := bld.Build()
+	if err != nil {
+		t.Fatal()
+	}
+	// The command-line argument takes precedence over the XML and the defaults.
+	if 456 != cnf.GetInt("foo.bar") {
+		t.Fatal()
+	}
+	if "xmlValue" != cnf.Get("foo.baz") {
+		t.Fatalf("foo.baz = %s", cnf.Get("foo.baz"))
+	}
+	if "" != cnf.Get("commented.out") {
+		t.Fatal()
+	}
+	if 4611686018427387904 != cnf.GetInt64("cmdline.opt") {
+		t.Fatal()
+	}
+}
+
+// Test our handling of the HTRACE_CONF_DIR environment variable.
+func TestGetHTracedConfDirs(t *testing.T) {
+	os.Setenv("HTRACED_CONF_DIR", "")
+	dlog := new(bytes.Buffer)
+	dirs := getHTracedConfDirs(dlog)
+	if len(dirs) != 1 || dirs[0] != getDefaultHTracedConfDir() {
+		t.Fatal()
+	}
+	os.Setenv("HTRACED_CONF_DIR", "/foo/bar:/baz")
+	dirs = getHTracedConfDirs(dlog)
+	if len(dirs) != 2 || dirs[0] != "/foo/bar" || dirs[1] != "/baz" {
+		t.Fatal()
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/conf/xml.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/conf/xml.go b/htrace-htraced/go/src/htrace/conf/xml.go
new file mode 100644
index 0000000..de14bc5
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/conf/xml.go
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package conf
+
+import (
+	"encoding/xml"
+	"io"
+	"log"
+)
+
+type configuration struct {
+	Properties []propertyXml `xml:"property"`
+}
+
+type propertyXml struct {
+	Name  string `xml:"name"`
+	Value string `xml:"value"`
+}
+
+// Parse an XML configuration file.
+func parseXml(reader io.Reader, m map[string]string) error {
+	dec := xml.NewDecoder(reader)
+	configurationXml := configuration{}
+	err := dec.Decode(&configurationXml)
+	if err != nil {
+		return err
+	}
+	props := configurationXml.Properties
+	for p := range props {
+		key := props[p].Name
+		value := props[p].Value
+		if key == "" {
+			log.Println("Warning: ignoring element with missing or empty <name>.")
+			continue
+		}
+		if value == "" {
+			log.Println("Warning: ignoring element with key " + key + " with missing or empty <value>.")
+			continue
+		}
+		//log.Printf("setting %s to %s\n", key, value)
+		m[key] = value
+	}
+	return nil
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/client_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/client_test.go b/htrace-htraced/go/src/htrace/htraced/client_test.go
new file mode 100644
index 0000000..6b50097
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/client_test.go
@@ -0,0 +1,484 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"fmt"
+	"github.com/ugorji/go/codec"
+	htrace "htrace/client"
+	"htrace/common"
+	"htrace/conf"
+	"htrace/test"
+	"math"
+	"math/rand"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"testing"
+	"time"
+)
+
+func TestClientGetServerVersion(t *testing.T) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestClientGetServerVersion",
+		DataDirs: make([]string, 2)}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	defer hcl.Close()
+	_, err = hcl.GetServerVersion()
+	if err != nil {
+		t.Fatalf("failed to call GetServerVersion: %s", err.Error())
+	}
+}
+
+func TestClientGetServerDebugInfo(t *testing.T) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestClientGetServerDebugInfo",
+		DataDirs: make([]string, 2)}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	defer hcl.Close()
+	debugInfo, err := hcl.GetServerDebugInfo()
+	if err != nil {
+		t.Fatalf("failed to call GetServerDebugInfo: %s", err.Error())
+	}
+	if debugInfo.StackTraces == "" {
+		t.Fatalf(`debugInfo.StackTraces == ""`)
+	}
+	if debugInfo.GCStats == "" {
+		t.Fatalf(`debugInfo.GCStats == ""`)
+	}
+}
+
+func createRandomTestSpans(amount int) common.SpanSlice {
+	rnd := rand.New(rand.NewSource(2))
+	allSpans := make(common.SpanSlice, amount)
+	allSpans[0] = test.NewRandomSpan(rnd, allSpans[0:0])
+	for i := 1; i < amount; i++ {
+		allSpans[i] = test.NewRandomSpan(rnd, allSpans[1:i])
+	}
+	allSpans[1].SpanData.Parents = []common.SpanId{common.SpanId(allSpans[0].Id)}
+	return allSpans
+}
+
+func TestClientOperations(t *testing.T) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestClientOperations",
+		DataDirs:     make([]string, 2),
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	defer hcl.Close()
+
+	// Create some random trace spans.
+	NUM_TEST_SPANS := 30
+	allSpans := createRandomTestSpans(NUM_TEST_SPANS)
+
+	// Write half of the spans to htraced via the client.
+	err = hcl.WriteSpans(allSpans[0 : NUM_TEST_SPANS/2])
+	if err != nil {
+		t.Fatalf("WriteSpans(0:%d) failed: %s\n", NUM_TEST_SPANS/2,
+			err.Error())
+	}
+	ht.Store.WrittenSpans.Waits(int64(NUM_TEST_SPANS / 2))
+
+	// Look up the first half of the spans.  They should be found.
+	var span *common.Span
+	for i := 0; i < NUM_TEST_SPANS/2; i++ {
+		span, err = hcl.FindSpan(allSpans[i].Id)
+		if err != nil {
+			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
+		}
+		common.ExpectSpansEqual(t, allSpans[i], span)
+	}
+
+	// Look up the second half of the spans.  They should not be found.
+	for i := NUM_TEST_SPANS / 2; i < NUM_TEST_SPANS; i++ {
+		span, err = hcl.FindSpan(allSpans[i].Id)
+		if err != nil {
+			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
+		}
+		if span != nil {
+			t.Fatalf("Unexpectedly found a span we never write to "+
+				"the server: FindSpan(%d) succeeded\n", i)
+		}
+	}
+
+	// Test FindChildren
+	childSpan := allSpans[1]
+	parentId := childSpan.Parents[0]
+	var children []common.SpanId
+	children, err = hcl.FindChildren(parentId, 1)
+	if err != nil {
+		t.Fatalf("FindChildren(%s) failed: %s\n", parentId, err.Error())
+	}
+	if len(children) != 1 {
+		t.Fatalf("FindChildren(%s) returned an invalid number of "+
+			"children: expected %d, got %d\n", parentId, 1, len(children))
+	}
+	if !children[0].Equal(childSpan.Id) {
+		t.Fatalf("FindChildren(%s) returned an invalid child id: expected %s, "+
+			" got %s\n", parentId, childSpan.Id, children[0])
+	}
+
+	// Test FindChildren on a span that has no children
+	childlessSpan := allSpans[NUM_TEST_SPANS/2]
+	children, err = hcl.FindChildren(childlessSpan.Id, 10)
+	if err != nil {
+		t.Fatalf("FindChildren(%d) failed: %s\n", childlessSpan.Id, err.Error())
+	}
+	if len(children) != 0 {
+		t.Fatalf("FindChildren(%d) returned an invalid number of "+
+			"children: expected %d, got %d\n", childlessSpan.Id, 0, len(children))
+	}
+
+	// Test Query
+	var query common.Query
+	query = common.Query{Lim: 10}
+	spans, err := hcl.Query(&query)
+	if err != nil {
+		t.Fatalf("Query({lim: %d}) failed: %s\n", 10, err.Error())
+	}
+	if len(spans) != 10 {
+		t.Fatalf("Query({lim: %d}) returned an invalid number of "+
+			"children: expected %d, got %d\n", 10, 10, len(spans))
+	}
+}
+
+func TestDumpAll(t *testing.T) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestDumpAll",
+		DataDirs:     make([]string, 2),
+		WrittenSpans: common.NewSemaphore(0),
+		Cnf: map[string]string{
+			conf.HTRACE_LOG_LEVEL: "INFO",
+		},
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	defer hcl.Close()
+
+	NUM_TEST_SPANS := 100
+	allSpans := createRandomTestSpans(NUM_TEST_SPANS)
+	sort.Sort(allSpans)
+	err = hcl.WriteSpans(allSpans)
+	if err != nil {
+		t.Fatalf("WriteSpans failed: %s\n", err.Error())
+	}
+	ht.Store.WrittenSpans.Waits(int64(NUM_TEST_SPANS))
+	out := make(chan *common.Span, NUM_TEST_SPANS)
+	var dumpErr error
+	go func() {
+		dumpErr = hcl.DumpAll(3, out)
+	}()
+	var numSpans int
+	nextLogTime := time.Now().Add(time.Millisecond * 5)
+	for {
+		span, channelOpen := <-out
+		if !channelOpen {
+			break
+		}
+		common.ExpectSpansEqual(t, allSpans[numSpans], span)
+		numSpans++
+		if testing.Verbose() {
+			now := time.Now()
+			if !now.Before(nextLogTime) {
+				nextLogTime = now
+				nextLogTime = nextLogTime.Add(time.Millisecond * 5)
+				fmt.Printf("read back %d span(s)...\n", numSpans)
+			}
+		}
+	}
+	if numSpans != len(allSpans) {
+		t.Fatalf("expected to read %d spans... but only read %d\n",
+			len(allSpans), numSpans)
+	}
+	if dumpErr != nil {
+		t.Fatalf("got dump error %s\n", dumpErr.Error())
+	}
+}
+
+const EXAMPLE_CONF_KEY = "example.conf.key"
+const EXAMPLE_CONF_VALUE = "foo.bar.baz"
+
+func TestClientGetServerConf(t *testing.T) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestClientGetServerConf",
+		Cnf: map[string]string{
+			EXAMPLE_CONF_KEY: EXAMPLE_CONF_VALUE,
+		},
+		DataDirs: make([]string, 2)}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	defer hcl.Close()
+	serverCnf, err2 := hcl.GetServerConf()
+	if err2 != nil {
+		t.Fatalf("failed to call GetServerConf: %s", err2.Error())
+	}
+	if serverCnf[EXAMPLE_CONF_KEY] != EXAMPLE_CONF_VALUE {
+		t.Fatalf("unexpected value for %s: %s",
+			EXAMPLE_CONF_KEY, EXAMPLE_CONF_VALUE)
+	}
+}
+
+const TEST_NUM_HRPC_HANDLERS = 2
+
+const TEST_NUM_WRITESPANS = 4
+
+// Tests that HRPC limits the number of simultaneous connections being processed.
+func TestHrpcAdmissionsControl(t *testing.T) {
+	var wg sync.WaitGroup
+	wg.Add(TEST_NUM_WRITESPANS)
+	var numConcurrentHrpcCalls int32
+	testHooks := &hrpcTestHooks{
+		HandleAdmission: func() {
+			defer wg.Done()
+			n := atomic.AddInt32(&numConcurrentHrpcCalls, 1)
+			if n > TEST_NUM_HRPC_HANDLERS {
+				t.Fatalf("The number of concurrent HRPC calls went above "+
+					"%d: it's at %d\n", TEST_NUM_HRPC_HANDLERS, n)
+			}
+			time.Sleep(1 * time.Millisecond)
+			n = atomic.AddInt32(&numConcurrentHrpcCalls, -1)
+			if n >= TEST_NUM_HRPC_HANDLERS {
+				t.Fatalf("The number of concurrent HRPC calls went above "+
+					"%d: it was at %d\n", TEST_NUM_HRPC_HANDLERS, n+1)
+			}
+		},
+	}
+	htraceBld := &MiniHTracedBuilder{Name: "TestHrpcAdmissionsControl",
+		DataDirs: make([]string, 2),
+		Cnf: map[string]string{
+			conf.HTRACE_NUM_HRPC_HANDLERS: fmt.Sprintf("%d", TEST_NUM_HRPC_HANDLERS),
+		},
+		WrittenSpans:  common.NewSemaphore(0),
+		HrpcTestHooks: testHooks,
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	// Create some random trace spans.
+	allSpans := createRandomTestSpans(TEST_NUM_WRITESPANS)
+	for iter := 0; iter < TEST_NUM_WRITESPANS; iter++ {
+		go func(i int) {
+			err = hcl.WriteSpans(allSpans[i : i+1])
+			if err != nil {
+				t.Fatalf("WriteSpans failed: %s\n", err.Error())
+			}
+		}(iter)
+	}
+	wg.Wait()
+	ht.Store.WrittenSpans.Waits(int64(TEST_NUM_WRITESPANS))
+}
+
+// Tests that HRPC I/O timeouts work.
+func TestHrpcIoTimeout(t *testing.T) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestHrpcIoTimeout",
+		DataDirs: make([]string, 2),
+		Cnf: map[string]string{
+			conf.HTRACE_NUM_HRPC_HANDLERS:  fmt.Sprintf("%d", TEST_NUM_HRPC_HANDLERS),
+			conf.HTRACE_HRPC_IO_TIMEOUT_MS: "1",
+		},
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	finishClient := make(chan interface{})
+	defer func() {
+		// Close the finishClient channel, if it hasn't already been closed.
+		defer func() { recover() }()
+		close(finishClient)
+	}()
+	testHooks := &htrace.TestHooks{
+		HandleWriteRequestBody: func() {
+			<-finishClient
+		},
+	}
+	hcl, err = htrace.NewClient(ht.ClientConf(), testHooks)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	// Create some random trace spans.
+	allSpans := createRandomTestSpans(TEST_NUM_WRITESPANS)
+	var wg sync.WaitGroup
+	wg.Add(TEST_NUM_WRITESPANS)
+	for iter := 0; iter < TEST_NUM_WRITESPANS; iter++ {
+		go func(i int) {
+			defer wg.Done()
+			// Ignore the error return because there are internal retries in
+			// the client which will make this succeed eventually, usually.
+			// Keep in mind that we only block until we have seen
+			// TEST_NUM_WRITESPANS I/O errors in the HRPC server-- after that,
+			// we let requests through so that the test can exit cleanly.
+			hcl.WriteSpans(allSpans[i : i+1])
+		}(iter)
+	}
+	for {
+		if ht.Hsv.GetNumIoErrors() >= TEST_NUM_WRITESPANS {
+			break
+		}
+		time.Sleep(1000 * time.Nanosecond)
+	}
+	close(finishClient)
+	wg.Wait()
+}
+
+func doWriteSpans(name string, N int, maxSpansPerRpc uint32, b *testing.B) {
+	htraceBld := &MiniHTracedBuilder{Name: "doWriteSpans",
+		Cnf: map[string]string{
+			conf.HTRACE_LOG_LEVEL:         "INFO",
+			conf.HTRACE_NUM_HRPC_HANDLERS: "20",
+		},
+		WrittenSpans: common.NewSemaphore(int64(1 - N)),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	rnd := rand.New(rand.NewSource(1))
+	allSpans := make([]*common.Span, N)
+	for n := 0; n < N; n++ {
+		allSpans[n] = test.NewRandomSpan(rnd, allSpans[0:n])
+	}
+	// Determine how many calls to WriteSpans we should make.  Each writeSpans
+	// message should be small enough so that it doesn't exceed the max RPC
+	// body length limit.  TODO: a production-quality golang client would do
+	// this internally rather than needing us to do it here in the unit test.
+	bodyLen := (4 * common.MAX_HRPC_BODY_LENGTH) / 5
+	reqs := make([][]*common.Span, 0, 4)
+	curReq := -1
+	curReqLen := bodyLen
+	var curReqSpans uint32
+	mh := new(codec.MsgpackHandle)
+	mh.WriteExt = true
+	var mbuf [8192]byte
+	buf := mbuf[:0]
+	enc := codec.NewEncoderBytes(&buf, mh)
+	for n := 0; n < N; n++ {
+		span := allSpans[n]
+		if (curReqSpans >= maxSpansPerRpc) ||
+			(curReqLen >= bodyLen) {
+			reqs = append(reqs, make([]*common.Span, 0, 16))
+			curReqLen = 0
+			curReq++
+			curReqSpans = 0
+		}
+		buf = mbuf[:0]
+		enc.ResetBytes(&buf)
+		err := enc.Encode(span)
+		if err != nil {
+			panic(fmt.Sprintf("Error encoding span %s: %s\n",
+				span.String(), err.Error()))
+		}
+		bufLen := len(buf)
+		if bufLen > (bodyLen / 5) {
+			panic(fmt.Sprintf("Span too long at %d bytes\n", bufLen))
+		}
+		curReqLen += bufLen
+		reqs[curReq] = append(reqs[curReq], span)
+		curReqSpans++
+	}
+	ht.Store.lg.Infof("num spans: %d.  num WriteSpansReq calls: %d\n", N, len(reqs))
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		panic(fmt.Sprintf("failed to create client: %s", err.Error()))
+	}
+	defer hcl.Close()
+
+	// Reset the timer to avoid including the time required to create new
+	// random spans in the benchmark total.
+	if b != nil {
+		b.ResetTimer()
+	}
+
+	// Write many random spans.
+	for reqIdx := range reqs {
+		go func(i int) {
+			err = hcl.WriteSpans(reqs[i])
+			if err != nil {
+				panic(fmt.Sprintf("failed to send WriteSpans request %d: %s",
+					i, err.Error()))
+			}
+		}(reqIdx)
+	}
+	// Wait for all the spans to be written.
+	ht.Store.WrittenSpans.Wait()
+}
+
+// This is a test of how quickly we can create new spans via WriteSpans RPCs.
+// Like BenchmarkDatastoreWrites, it creates b.N spans in the datastore.
+// Unlike that benchmark, it sends the spans via RPC.
+// Suggested flags for running this:
+// -tags unsafe -cpu 16 -benchtime=1m
+func BenchmarkWriteSpans(b *testing.B) {
+	doWriteSpans("BenchmarkWriteSpans", b.N, math.MaxUint32, b)
+}
+
+func TestWriteSpansRpcs(t *testing.T) {
+	doWriteSpans("TestWriteSpansRpcs", 3000, 1000, nil)
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/datastore.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/datastore.go b/htrace-htraced/go/src/htrace/htraced/datastore.go
new file mode 100644
index 0000000..26531af
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/datastore.go
@@ -0,0 +1,1339 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"github.com/jmhodges/levigo"
+	"github.com/ugorji/go/codec"
+	"htrace/common"
+	"htrace/conf"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+//
+// The data store code for HTraced.
+//
+// This code stores the trace spans.  We use levelDB here so that we don't have to store everything
+// in memory at all times.  The data is sharded across multiple levelDB databases in multiple
+// directories.  Normally, these multiple directories will be on multiple disk drives.
+//
+// The main emphasis in the HTraceD data store is on quickly and efficiently storing trace span data
+// coming from many daemons.  Durability is not as big a concern as in some data stores, since
+// losing a little bit of trace data if htraced goes down is not critical.  We use msgpack
+// for serialization.  We assume that there will be many more writes than reads.
+//
+// Schema
+// w -> ShardInfo
+// s[8-byte-big-endian-sid] -> SpanData
+// b[8-byte-big-endian-begin-time][8-byte-big-endian-child-sid] -> {}
+// e[8-byte-big-endian-end-time][8-byte-big-endian-child-sid] -> {}
+// d[8-byte-big-endian-duration][8-byte-big-endian-child-sid] -> {}
+// p[8-byte-big-endian-parent-sid][8-byte-big-endian-child-sid] -> {}
+//
+// Note that span IDs are unsigned 64-bit numbers.
+// Begin times, end times, and durations are signed 64-bit numbers.
+// In order to get LevelDB to properly compare the signed 64-bit quantities,
+// we flip the highest bit.  This way, we can get leveldb to view negative
+// quantities as less than non-negative ones.  This also means that we can do
+// all queries using unsigned 64-bit math, rather than having to special-case
+// the signed fields.
+//
+
+var EMPTY_BYTE_BUF []byte = []byte{}
+
+const SPAN_ID_INDEX_PREFIX = 's'
+const BEGIN_TIME_INDEX_PREFIX = 'b'
+const END_TIME_INDEX_PREFIX = 'e'
+const DURATION_INDEX_PREFIX = 'd'
+const PARENT_ID_INDEX_PREFIX = 'p'
+const INVALID_INDEX_PREFIX = 0
+
+// The maximum span expiry time, in milliseconds.
+// For all practical purposes this is "never" since it's more than a million years.
+const MAX_SPAN_EXPIRY_MS = 0x7ffffffffffffff
+
+type IncomingSpan struct {
+	// The address that the span was sent from.
+	Addr string
+
+	// The span.
+	*common.Span
+
+	// Serialized span data
+	SpanDataBytes []byte
+}
+
+// A single directory containing a levelDB instance.
+type shard struct {
+	// The data store that this shard is part of
+	store *dataStore
+
+	// The LevelDB instance.
+	ldb *levigo.DB
+
+	// The path to the leveldb directory this shard is managing.
+	path string
+
+	// Incoming requests to write Spans.
+	incoming chan []*IncomingSpan
+
+	// A channel for incoming heartbeats
+	heartbeats chan interface{}
+
+	// Tracks whether the shard goroutine has exited.
+	exited sync.WaitGroup
+}
+
+// Process incoming spans for a shard.
+func (shd *shard) processIncoming() {
+	lg := shd.store.lg
+	defer func() {
+		lg.Infof("Shard processor for %s exiting.\n", shd.path)
+		shd.exited.Done()
+	}()
+	for {
+		select {
+		case spans := <-shd.incoming:
+			if spans == nil {
+				return
+			}
+			totalWritten := 0
+			totalDropped := 0
+			for spanIdx := range spans {
+				err := shd.writeSpan(spans[spanIdx])
+				if err != nil {
+					lg.Errorf("Shard processor for %s got fatal error %s.\n",
+						shd.path, err.Error())
+					totalDropped++
+				} else {
+					if lg.TraceEnabled() {
+						lg.Tracef("Shard processor for %s wrote span %s.\n",
+							shd.path, spans[spanIdx].ToJson())
+					}
+					totalWritten++
+				}
+			}
+			shd.store.msink.UpdatePersisted(spans[0].Addr, totalWritten, totalDropped)
+			if shd.store.WrittenSpans != nil {
+				lg.Debugf("Shard %s incrementing WrittenSpans by %d\n", shd.path, len(spans))
+				shd.store.WrittenSpans.Posts(int64(len(spans)))
+			}
+		case <-shd.heartbeats:
+			lg.Tracef("Shard processor for %s handling heartbeat.\n", shd.path)
+			shd.pruneExpired()
+		}
+	}
+}
+
+func (shd *shard) pruneExpired() {
+	lg := shd.store.rpr.lg
+	src, err := CreateReaperSource(shd)
+	if err != nil {
+		lg.Errorf("Error creating reaper source for shd(%s): %s\n",
+			shd.path, err.Error())
+		return
+	}
+	var totalReaped uint64
+	defer func() {
+		src.Close()
+		if totalReaped > 0 {
+			atomic.AddUint64(&shd.store.rpr.ReapedSpans, totalReaped)
+		}
+	}()
+	urdate := s2u64(shd.store.rpr.GetReaperDate())
+	for {
+		span := src.next()
+		if span == nil {
+			lg.Debugf("After reaping %d span(s), no more found in shard %s "+
+				"to reap.\n", totalReaped, shd.path)
+			return
+		}
+		begin := s2u64(span.Begin)
+		if begin >= urdate {
+			lg.Debugf("After reaping %d span(s), the remaining spans in "+
+				"shard %s are new enough to be kept\n",
+				totalReaped, shd.path)
+			return
+		}
+		err = shd.DeleteSpan(span)
+		if err != nil {
+			lg.Errorf("Error deleting span %s from shd(%s): %s\n",
+				span.String(), shd.path, err.Error())
+			return
+		}
+		if lg.TraceEnabled() {
+			lg.Tracef("Reaped span %s from shard %s\n", span.String(), shd.path)
+		}
+		totalReaped++
+	}
+}
+
+// Delete a span from the shard.  Note that leveldb may retain the data until
+// compaction(s) remove it.
+func (shd *shard) DeleteSpan(span *common.Span) error {
+	batch := levigo.NewWriteBatch()
+	defer batch.Close()
+	primaryKey :=
+		append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...)
+	batch.Delete(primaryKey)
+	for parentIdx := range span.Parents {
+		key := append(append([]byte{PARENT_ID_INDEX_PREFIX},
+			span.Parents[parentIdx].Val()...), span.Id.Val()...)
+		batch.Delete(key)
+	}
+	beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX},
+		u64toSlice(s2u64(span.Begin))...), span.Id.Val()...)
+	batch.Delete(beginTimeKey)
+	endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX},
+		u64toSlice(s2u64(span.End))...), span.Id.Val()...)
+	batch.Delete(endTimeKey)
+	durationKey := append(append([]byte{DURATION_INDEX_PREFIX},
+		u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...)
+	batch.Delete(durationKey)
+	err := shd.ldb.Write(shd.store.writeOpts, batch)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert a signed 64-bit number into an unsigned 64-bit number.  We flip the
+// highest bit, so that negative input values map to unsigned numbers which are
+// less than non-negative input values.
+func s2u64(val int64) uint64 {
+	ret := uint64(val)
+	ret ^= 0x8000000000000000
+	return ret
+}
+
+func u64toSlice(val uint64) []byte {
+	return []byte{
+		byte(0xff & (val >> 56)),
+		byte(0xff & (val >> 48)),
+		byte(0xff & (val >> 40)),
+		byte(0xff & (val >> 32)),
+		byte(0xff & (val >> 24)),
+		byte(0xff & (val >> 16)),
+		byte(0xff & (val >> 8)),
+		byte(0xff & (val >> 0))}
+}
+
+func (shd *shard) writeSpan(ispan *IncomingSpan) error {
+	batch := levigo.NewWriteBatch()
+	defer batch.Close()
+	span := ispan.Span
+	primaryKey :=
+		append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...)
+	batch.Put(primaryKey, ispan.SpanDataBytes)
+
+	// Add this to the parent index.
+	for parentIdx := range span.Parents {
+		key := append(append([]byte{PARENT_ID_INDEX_PREFIX},
+			span.Parents[parentIdx].Val()...), span.Id.Val()...)
+		batch.Put(key, EMPTY_BYTE_BUF)
+	}
+
+	// Add to the other secondary indices.
+	beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX},
+		u64toSlice(s2u64(span.Begin))...), span.Id.Val()...)
+	batch.Put(beginTimeKey, EMPTY_BYTE_BUF)
+	endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX},
+		u64toSlice(s2u64(span.End))...), span.Id.Val()...)
+	batch.Put(endTimeKey, EMPTY_BYTE_BUF)
+	durationKey := append(append([]byte{DURATION_INDEX_PREFIX},
+		u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...)
+	batch.Put(durationKey, EMPTY_BYTE_BUF)
+
+	err := shd.ldb.Write(shd.store.writeOpts, batch)
+	if err != nil {
+		shd.store.lg.Errorf("Error writing span %s to leveldb at %s: %s\n",
+			span.String(), shd.path, err.Error())
+		return err
+	}
+	return nil
+}
+
+func (shd *shard) FindChildren(sid common.SpanId, childIds []common.SpanId,
+	lim int32) ([]common.SpanId, int32, error) {
+	searchKey := append([]byte{PARENT_ID_INDEX_PREFIX}, sid.Val()...)
+	iter := shd.ldb.NewIterator(shd.store.readOpts)
+	defer iter.Close()
+	iter.Seek(searchKey)
+	for {
+		if !iter.Valid() {
+			break
+		}
+		if lim == 0 {
+			break
+		}
+		key := iter.Key()
+		if !bytes.HasPrefix(key, searchKey) {
+			break
+		}
+		id := common.SpanId(key[17:])
+		childIds = append(childIds, id)
+		lim--
+		iter.Next()
+	}
+	return childIds, lim, nil
+}
+
+// Close a shard.
+func (shd *shard) Close() {
+	lg := shd.store.lg
+	shd.incoming <- nil
+	lg.Infof("Waiting for %s to exit...\n", shd.path)
+	shd.exited.Wait()
+	shd.ldb.Close()
+	lg.Infof("Closed %s...\n", shd.path)
+}
+
+type Reaper struct {
+	// The logger used by the reaper
+	lg *common.Logger
+
+	// The number of milliseconds to keep spans around, in milliseconds.
+	spanExpiryMs int64
+
+	// The oldest date for which we'll keep spans.
+	reaperDate int64
+
+	// A channel used to send heartbeats to the reaper
+	heartbeats chan interface{}
+
+	// Tracks whether the reaper goroutine has exited
+	exited sync.WaitGroup
+
+	// The lock protecting reaper data.
+	lock sync.Mutex
+
+	// The reaper heartbeater
+	hb *Heartbeater
+
+	// The total number of spans which have been reaped.
+	ReapedSpans uint64
+}
+
+func NewReaper(cnf *conf.Config) *Reaper {
+	rpr := &Reaper{
+		lg:           common.NewLogger("reaper", cnf),
+		spanExpiryMs: cnf.GetInt64(conf.HTRACE_SPAN_EXPIRY_MS),
+		heartbeats:   make(chan interface{}, 1),
+	}
+	if rpr.spanExpiryMs >= MAX_SPAN_EXPIRY_MS {
+		rpr.spanExpiryMs = MAX_SPAN_EXPIRY_MS
+	} else if rpr.spanExpiryMs <= 0 {
+		rpr.spanExpiryMs = MAX_SPAN_EXPIRY_MS
+	}
+	rpr.hb = NewHeartbeater("ReaperHeartbeater",
+		cnf.GetInt64(conf.HTRACE_REAPER_HEARTBEAT_PERIOD_MS), rpr.lg)
+	rpr.exited.Add(1)
+	go rpr.run()
+	rpr.hb.AddHeartbeatTarget(&HeartbeatTarget{
+		name:       "reaper",
+		targetChan: rpr.heartbeats,
+	})
+	var when string
+	if rpr.spanExpiryMs >= MAX_SPAN_EXPIRY_MS {
+		when = "never"
+	} else {
+		when = "after " + time.Duration(rpr.spanExpiryMs).String()
+	}
+	rpr.lg.Infof("Initializing span reaper: span time out = %s.\n", when)
+	return rpr
+}
+
+func (rpr *Reaper) run() {
+	defer func() {
+		rpr.lg.Info("Exiting Reaper goroutine.\n")
+		rpr.exited.Done()
+	}()
+
+	for {
+		_, isOpen := <-rpr.heartbeats
+		if !isOpen {
+			return
+		}
+		rpr.handleHeartbeat()
+	}
+}
+
+func (rpr *Reaper) handleHeartbeat() {
+	// TODO: check dataStore fullness
+	now := common.TimeToUnixMs(time.Now().UTC())
+	d, updated := func() (int64, bool) {
+		rpr.lock.Lock()
+		defer rpr.lock.Unlock()
+		newReaperDate := now - rpr.spanExpiryMs
+		if newReaperDate > rpr.reaperDate {
+			rpr.reaperDate = newReaperDate
+			return rpr.reaperDate, true
+		} else {
+			return rpr.reaperDate, false
+		}
+	}()
+	if rpr.lg.DebugEnabled() {
+		if updated {
+			rpr.lg.Debugf("Updating UTC reaper date to %s.\n",
+				common.UnixMsToTime(d).Format(time.RFC3339))
+		} else {
+			rpr.lg.Debugf("Not updating previous reaperDate of %s.\n",
+				common.UnixMsToTime(d).Format(time.RFC3339))
+		}
+	}
+}
+
+func (rpr *Reaper) GetReaperDate() int64 {
+	rpr.lock.Lock()
+	defer rpr.lock.Unlock()
+	return rpr.reaperDate
+}
+
+func (rpr *Reaper) SetReaperDate(rdate int64) {
+	rpr.lock.Lock()
+	defer rpr.lock.Unlock()
+	rpr.reaperDate = rdate
+}
+
+func (rpr *Reaper) Shutdown() {
+	rpr.hb.Shutdown()
+	close(rpr.heartbeats)
+}
+
+// The Data Store.
+type dataStore struct {
+	lg *common.Logger
+
+	// The shards which manage our LevelDB instances.
+	shards []*shard
+
+	// The read options to use for LevelDB.
+	readOpts *levigo.ReadOptions
+
+	// The write options to use for LevelDB.
+	writeOpts *levigo.WriteOptions
+
+	// If non-null, a semaphore we will increment once for each span we receive.
+	// Used for testing.
+	WrittenSpans *common.Semaphore
+
+	// The metrics sink.
+	msink *MetricsSink
+
+	// The heartbeater which periodically asks shards to update the MetricsSink.
+	hb *Heartbeater
+
+	// The reaper for this datastore
+	rpr *Reaper
+
+	// When this datastore was started (in UTC milliseconds since the epoch)
+	startMs int64
+}
+
+func CreateDataStore(cnf *conf.Config, writtenSpans *common.Semaphore) (*dataStore, error) {
+	dld := NewDataStoreLoader(cnf)
+	defer dld.Close()
+	err := dld.Load()
+	if err != nil {
+		dld.lg.Errorf("Error loading datastore: %s\n", err.Error())
+		return nil, err
+	}
+	store := &dataStore{
+		lg:           dld.lg,
+		shards:       make([]*shard, len(dld.shards)),
+		readOpts:     dld.readOpts,
+		writeOpts:    dld.writeOpts,
+		WrittenSpans: writtenSpans,
+		msink:        NewMetricsSink(cnf),
+		hb: NewHeartbeater("DatastoreHeartbeater",
+			cnf.GetInt64(conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS), dld.lg),
+		rpr:     NewReaper(cnf),
+		startMs: common.TimeToUnixMs(time.Now().UTC()),
+	}
+	spanBufferSize := cnf.GetInt(conf.HTRACE_DATA_STORE_SPAN_BUFFER_SIZE)
+	for shdIdx := range store.shards {
+		shd := &shard{
+			store:      store,
+			ldb:        dld.shards[shdIdx].ldb,
+			path:       dld.shards[shdIdx].path,
+			incoming:   make(chan []*IncomingSpan, spanBufferSize),
+			heartbeats: make(chan interface{}, 1),
+		}
+		shd.exited.Add(1)
+		go shd.processIncoming()
+		store.shards[shdIdx] = shd
+		store.hb.AddHeartbeatTarget(&HeartbeatTarget{
+			name:       fmt.Sprintf("shard(%s)", shd.path),
+			targetChan: shd.heartbeats,
+		})
+	}
+	dld.DisownResources()
+	return store, nil
+}
+
+// Close the DataStore.
+func (store *dataStore) Close() {
+	if store.hb != nil {
+		store.hb.Shutdown()
+		store.hb = nil
+	}
+	for idx := range store.shards {
+		if store.shards[idx] != nil {
+			store.shards[idx].Close()
+			store.shards[idx] = nil
+		}
+	}
+	if store.rpr != nil {
+		store.rpr.Shutdown()
+		store.rpr = nil
+	}
+	if store.readOpts != nil {
+		store.readOpts.Close()
+		store.readOpts = nil
+	}
+	if store.writeOpts != nil {
+		store.writeOpts.Close()
+		store.writeOpts = nil
+	}
+	if store.lg != nil {
+		store.lg.Close()
+		store.lg = nil
+	}
+}
+
+// Get the index of the shard which stores the given spanId.
+func (store *dataStore) getShardIndex(sid common.SpanId) int {
+	return int(sid.Hash32() % uint32(len(store.shards)))
+}
+
+const WRITESPANS_BATCH_SIZE = 128
+
+// SpanIngestor is a class used internally to ingest spans from an RPC
+// endpoint.  It groups spans destined for a particular shard into small
+// batches, so that we can reduce the number of objects that need to be sent
+// over the shard's "incoming" channel.  Since sending objects over a channel
+// requires goroutine synchronization, this improves performance.
+//
+// SpanIngestor also allows us to reuse the same encoder object for many spans,
+// rather than creating a new encoder per span.  This avoids re-doing the
+// encoder setup for each span, and also generates less garbage.
+type SpanIngestor struct {
+	// The logger to use.
+	lg *common.Logger
+
+	// The dataStore we are ingesting spans into.
+	store *dataStore
+
+	// The remote address these spans are coming from.
+	addr string
+
+	// Default TracerId
+	defaultTrid string
+
+	// The msgpack handle to use to serialize the spans.
+	mh codec.MsgpackHandle
+
+	// The msgpack encoder to use to serialize the spans.
+	// Caching this avoids generating a lot of garbage and burning CPUs
+	// creating new encoder objects for each span.
+	enc *codec.Encoder
+
+	// The buffer which codec.Encoder is currently serializing to.
+	// We have to create a new buffer for each span because once we hand it off to the shard, the
+	// shard manages the buffer lifecycle.
+	spanDataBytes []byte
+
+	// An array mapping shard index to span batch.
+	batches []*SpanIngestorBatch
+
+	// The total number of spans ingested.  Includes dropped spans.
+	totalIngested int
+
+	// The total number of spans the ingestor dropped because of a server-side error.
+	serverDropped int
+}
+
+// A batch of spans destined for a particular shard.
+type SpanIngestorBatch struct {
+	incoming []*IncomingSpan
+}
+
+func (store *dataStore) NewSpanIngestor(lg *common.Logger,
+	addr string, defaultTrid string) *SpanIngestor {
+	ing := &SpanIngestor{
+		lg:            lg,
+		store:         store,
+		addr:          addr,
+		defaultTrid:   defaultTrid,
+		spanDataBytes: make([]byte, 0, 1024),
+		batches:       make([]*SpanIngestorBatch, len(store.shards)),
+	}
+	ing.mh.WriteExt = true
+	ing.enc = codec.NewEncoderBytes(&ing.spanDataBytes, &ing.mh)
+	for batchIdx := range ing.batches {
+		ing.batches[batchIdx] = &SpanIngestorBatch{
+			incoming: make([]*IncomingSpan, 0, WRITESPANS_BATCH_SIZE),
+		}
+	}
+	return ing
+}
+
+func (ing *SpanIngestor) IngestSpan(span *common.Span) {
+	ing.totalIngested++
+	// Make sure the span ID is valid.
+	spanIdProblem := span.Id.FindProblem()
+	if spanIdProblem != "" {
+		// Can't print the invalid span ID because String() might fail.
+		ing.lg.Warnf("Invalid span ID: %s\n", spanIdProblem)
+		ing.serverDropped++
+		return
+	}
+
+	// Set the default tracer id, if needed.
+	if span.TracerId == "" {
+		span.TracerId = ing.defaultTrid
+	}
+
+	// Encode the span data.  Doing the encoding here is better than doing it
+	// in the shard goroutine, because we can achieve more parallelism.
+	// There is one shard goroutine per shard, but potentially many more
+	// ingestors per shard.
+	err := ing.enc.Encode(span.SpanData)
+	if err != nil {
+		ing.lg.Warnf("Failed to encode span ID %s: %s\n",
+			span.Id.String(), err.Error())
+		ing.serverDropped++
+		return
+	}
+	spanDataBytes := ing.spanDataBytes
+	ing.spanDataBytes = make([]byte, 0, 1024)
+	ing.enc.ResetBytes(&ing.spanDataBytes)
+
+	// Determine which shard this span should go to.
+	shardIdx := ing.store.getShardIndex(span.Id)
+	batch := ing.batches[shardIdx]
+	incomingLen := len(batch.incoming)
+	if ing.lg.TraceEnabled() {
+		ing.lg.Tracef("SpanIngestor#IngestSpan: spanId=%s, shardIdx=%d, "+
+			"incomingLen=%d, cap(batch.incoming)=%d\n",
+			span.Id.String(), shardIdx, incomingLen, cap(batch.incoming))
+	}
+	if incomingLen+1 == cap(batch.incoming) {
+		if ing.lg.TraceEnabled() {
+			ing.lg.Tracef("SpanIngestor#IngestSpan: flushing %d spans for "+
+				"shard %d\n", len(batch.incoming), shardIdx)
+		}
+		ing.store.WriteSpans(shardIdx, batch.incoming)
+		batch.incoming = make([]*IncomingSpan, 1, WRITESPANS_BATCH_SIZE)
+		incomingLen = 0
+	} else {
+		batch.incoming = batch.incoming[0 : incomingLen+1]
+	}
+	batch.incoming[incomingLen] = &IncomingSpan{
+		Addr:          ing.addr,
+		Span:          span,
+		SpanDataBytes: spanDataBytes,
+	}
+}
+
+func (ing *SpanIngestor) Close(startTime time.Time) {
+	for shardIdx := range ing.batches {
+		batch := ing.batches[shardIdx]
+		if len(batch.incoming) > 0 {
+			if ing.lg.TraceEnabled() {
+				ing.lg.Tracef("SpanIngestor#Close: flushing %d span(s) for "+
+					"shard %d\n", len(batch.incoming), shardIdx)
+			}
+			ing.store.WriteSpans(shardIdx, batch.incoming)
+		}
+		batch.incoming = nil
+	}
+	ing.lg.Debugf("Closed span ingestor for %s.  Ingested %d span(s); dropped "+
+		"%d span(s).\n", ing.addr, ing.totalIngested, ing.serverDropped)
+
+	endTime := time.Now()
+	ing.store.msink.UpdateIngested(ing.addr, ing.totalIngested,
+		ing.serverDropped, endTime.Sub(startTime))
+}
+
+func (store *dataStore) WriteSpans(shardIdx int, ispans []*IncomingSpan) {
+	store.shards[shardIdx].incoming <- ispans
+}
+
+func (store *dataStore) FindSpan(sid common.SpanId) *common.Span {
+	return store.shards[store.getShardIndex(sid)].FindSpan(sid)
+}
+
+func (shd *shard) FindSpan(sid common.SpanId) *common.Span {
+	lg := shd.store.lg
+	primaryKey := append([]byte{SPAN_ID_INDEX_PREFIX}, sid.Val()...)
+	buf, err := shd.ldb.Get(shd.store.readOpts, primaryKey)
+	if err != nil {
+		if strings.Index(err.Error(), "NotFound:") != -1 {
+			return nil
+		}
+		lg.Warnf("Shard(%s): FindSpan(%s) error: %s\n",
+			shd.path, sid.String(), err.Error())
+		return nil
+	}
+	var span *common.Span
+	span, err = shd.decodeSpan(sid, buf)
+	if err != nil {
+		lg.Errorf("Shard(%s): FindSpan(%s) decode error: %s decoding [%s]\n",
+			shd.path, sid.String(), err.Error(), hex.EncodeToString(buf))
+		return nil
+	}
+	return span
+}
+
+func (shd *shard) decodeSpan(sid common.SpanId, buf []byte) (*common.Span, error) {
+	r := bytes.NewBuffer(buf)
+	mh := new(codec.MsgpackHandle)
+	mh.WriteExt = true
+	decoder := codec.NewDecoder(r, mh)
+	data := common.SpanData{}
+	err := decoder.Decode(&data)
+	if err != nil {
+		return nil, err
+	}
+	if data.Parents == nil {
+		data.Parents = []common.SpanId{}
+	}
+	return &common.Span{Id: common.SpanId(sid), SpanData: data}, nil
+}
+
+// Find the children of a given span id.
+func (store *dataStore) FindChildren(sid common.SpanId, lim int32) []common.SpanId {
+	childIds := make([]common.SpanId, 0)
+	var err error
+
+	startIdx := store.getShardIndex(sid)
+	idx := startIdx
+	numShards := len(store.shards)
+	for {
+		if lim == 0 {
+			break
+		}
+		shd := store.shards[idx]
+		childIds, lim, err = shd.FindChildren(sid, childIds, lim)
+		if err != nil {
+			store.lg.Errorf("Shard(%s): FindChildren(%s) error: %s\n",
+				shd.path, sid.String(), err.Error())
+		}
+		idx++
+		if idx >= numShards {
+			idx = 0
+		}
+		if idx == startIdx {
+			break
+		}
+	}
+	return childIds
+}
+
+type predicateData struct {
+	*common.Predicate
+	key []byte
+}
+
+func loadPredicateData(pred *common.Predicate) (*predicateData, error) {
+	p := predicateData{Predicate: pred}
+
+	// Parse the input value given to make sure it matches up with the field
+	// type.
+	switch pred.Field {
+	case common.SPAN_ID:
+		// Span IDs are sent as hex strings.
+		var id common.SpanId
+		if err := id.FromString(pred.Val); err != nil {
+			return nil, errors.New(fmt.Sprintf("Unable to parse span id '%s': %s",
+				pred.Val, err.Error()))
+		}
+		p.key = id.Val()
+		break
+	case common.DESCRIPTION:
+		// Any string is valid for a description.
+		p.key = []byte(pred.Val)
+		break
+	case common.BEGIN_TIME, common.END_TIME, common.DURATION:
+		// Parse a base-10 signed numeric field.
+		v, err := strconv.ParseInt(pred.Val, 10, 64)
+		if err != nil {
+			return nil, errors.New(fmt.Sprintf("Unable to parse %s '%s': %s",
+				pred.Field, pred.Val, err.Error()))
+		}
+		p.key = u64toSlice(s2u64(v))
+		break
+	case common.TRACER_ID:
+		// Any string is valid for a tracer ID.
+		p.key = []byte(pred.Val)
+		break
+	default:
+		return nil, errors.New(fmt.Sprintf("Unknown field %s", pred.Field))
+	}
+
+	// Validate the predicate operation.
+	switch pred.Op {
+	case common.EQUALS, common.LESS_THAN_OR_EQUALS,
+		common.GREATER_THAN_OR_EQUALS, common.GREATER_THAN:
+		break
+	case common.CONTAINS:
+		if p.fieldIsNumeric() {
+			return nil, errors.New(fmt.Sprintf("Can't use CONTAINS on a "+
+				"numeric field like '%s'", pred.Field))
+		}
+	default:
+		return nil, errors.New(fmt.Sprintf("Unknown predicate operation '%s'",
+			pred.Op))
+	}
+
+	return &p, nil
+}
+
+// Get the index prefix for this predicate, or 0 if it is not indexed.
+func (pred *predicateData) getIndexPrefix() byte {
+	switch pred.Field {
+	case common.SPAN_ID:
+		return SPAN_ID_INDEX_PREFIX
+	case common.BEGIN_TIME:
+		return BEGIN_TIME_INDEX_PREFIX
+	case common.END_TIME:
+		return END_TIME_INDEX_PREFIX
+	case common.DURATION:
+		return DURATION_INDEX_PREFIX
+	default:
+		return INVALID_INDEX_PREFIX
+	}
+}
+
+// Returns true if the predicate type is numeric.
+func (pred *predicateData) fieldIsNumeric() bool {
+	switch pred.Field {
+	case common.SPAN_ID, common.BEGIN_TIME, common.END_TIME, common.DURATION:
+		return true
+	default:
+		return false
+	}
+}
+
+// Get the values that this predicate cares about for a given span.
+func (pred *predicateData) extractRelevantSpanData(span *common.Span) []byte {
+	switch pred.Field {
+	case common.SPAN_ID:
+		return span.Id.Val()
+	case common.DESCRIPTION:
+		return []byte(span.Description)
+	case common.BEGIN_TIME:
+		return u64toSlice(s2u64(span.Begin))
+	case common.END_TIME:
+		return u64toSlice(s2u64(span.End))
+	case common.DURATION:
+		return u64toSlice(s2u64(span.Duration()))
+	case common.TRACER_ID:
+		return []byte(span.TracerId)
+	default:
+		panic(fmt.Sprintf("Unknown field type %s.", pred.Field))
+	}
+}
+
+func (pred *predicateData) spanPtrIsBefore(a *common.Span, b *common.Span) bool {
+	// nil is after everything.
+	if a == nil {
+		if b == nil {
+			return false
+		}
+		return false
+	} else if b == nil {
+		return true
+	}
+	// Compare the spans according to this predicate.
+	aVal := pred.extractRelevantSpanData(a)
+	bVal := pred.extractRelevantSpanData(b)
+	cmp := bytes.Compare(aVal, bVal)
+	if pred.Op.IsDescending() {
+		return cmp > 0
+	} else {
+		return cmp < 0
+	}
+}
+
+type satisfiedByReturn int
+
+const (
+	NOT_SATISFIED     satisfiedByReturn = iota
+	NOT_YET_SATISFIED                   = iota
+	SATISFIED                           = iota
+)
+
+func (r satisfiedByReturn) String() string {
+	switch r {
+	case NOT_SATISFIED:
+		return "NOT_SATISFIED"
+	case NOT_YET_SATISFIED:
+		return "NOT_YET_SATISFIED"
+	case SATISFIED:
+		return "SATISFIED"
+	default:
+		return "(unknown)"
+	}
+}
+
+// Determine whether the predicate is satisfied by the given span.
+func (pred *predicateData) satisfiedBy(span *common.Span) satisfiedByReturn {
+	val := pred.extractRelevantSpanData(span)
+	switch pred.Op {
+	case common.CONTAINS:
+		if bytes.Contains(val, pred.key) {
+			return SATISFIED
+		} else {
+			return NOT_SATISFIED
+		}
+	case common.EQUALS:
+		if bytes.Equal(val, pred.key) {
+			return SATISFIED
+		} else {
+			return NOT_SATISFIED
+		}
+	case common.LESS_THAN_OR_EQUALS:
+		if bytes.Compare(val, pred.key) <= 0 {
+			return SATISFIED
+		} else {
+			return NOT_YET_SATISFIED
+		}
+	case common.GREATER_THAN_OR_EQUALS:
+		if bytes.Compare(val, pred.key) >= 0 {
+			return SATISFIED
+		} else {
+			return NOT_SATISFIED
+		}
+	case common.GREATER_THAN:
+		cmp := bytes.Compare(val, pred.key)
+		if cmp <= 0 {
+			return NOT_YET_SATISFIED
+		} else {
+			return SATISFIED
+		}
+	default:
+		panic(fmt.Sprintf("unknown Op type %s should have been caught "+
+			"during normalization", pred.Op))
+	}
+}
+
+func (pred *predicateData) createSource(store *dataStore, prev *common.Span) (*source, error) {
+	var ret *source
+	src := source{store: store,
+		pred:      pred,
+		shards:    make([]*shard, len(store.shards)),
+		iters:     make([]*levigo.Iterator, 0, len(store.shards)),
+		nexts:     make([]*common.Span, len(store.shards)),
+		numRead:   make([]int, len(store.shards)),
+		keyPrefix: pred.getIndexPrefix(),
+	}
+	if src.keyPrefix == INVALID_INDEX_PREFIX {
+		return nil, errors.New(fmt.Sprintf("Can't create source from unindexed "+
+			"predicate on field %s", pred.Field))
+	}
+	defer func() {
+		if ret == nil {
+			src.Close()
+		}
+	}()
+	for shardIdx := range store.shards {
+		shd := store.shards[shardIdx]
+		src.shards[shardIdx] = shd
+		src.iters = append(src.iters, shd.ldb.NewIterator(store.readOpts))
+	}
+	var searchKey []byte
+	lg := store.lg
+	if prev != nil {
+		// If prev != nil, this query RPC is the continuation of a previous
+		// one.  The final result returned the last time is 'prev'.
+		//
+		// To avoid returning the same results multiple times, we adjust the
+		// predicate here.  If the predicate is on the span id field, we
+		// simply manipulate the span ID we're looking for.
+		//
+		// If the predicate is on a secondary index, we also use span ID, but
+		// in a slightly different way.  Since the secondary indices are
+		// organized as [type-code][8b-secondary-key][8b-span-id], elements
+		// with the same secondary index field are ordered by span ID.  So we
+		// create a 17-byte key incorporating the span ID from 'prev.'
+		startId := common.INVALID_SPAN_ID
+		switch pred.Op {
+		case common.EQUALS:
+			if pred.Field == common.SPAN_ID {
+				// This is an annoying corner case.  There can only be one
+				// result each time we do an EQUALS search for a span id.
+				// Span id is the primary key for all our spans.
+				// But for some reason someone is asking for another result.
+				// We modify the query to search for the illegal 0 span ID,
+				// which will never be present.
+				if lg.DebugEnabled() {
+					lg.Debugf("Attempted to use a continuation token with an EQUALS "+
+						"SPAN_ID query. %s.  Setting search id = 0",
+						pred.Predicate.String())
+				}
+				startId = common.INVALID_SPAN_ID
+			} else {
+				// When doing an EQUALS search on a secondary index, the
+				// results are sorted by span id.
+				startId = prev.Id.Next()
+			}
+		case common.LESS_THAN_OR_EQUALS:
+			// Subtract one from the previous span id.  Since the previous
+			// start ID will never be 0 (0 is an illegal span id), we'll never
+			// wrap around when doing this.
+			startId = prev.Id.Prev()
+		case common.GREATER_THAN_OR_EQUALS:
+			// We can't add one to the span id, since the previous span ID
+			// might be the maximum value.  So just switch over to using
+			// GREATER_THAN.
+			pred.Op = common.GREATER_THAN
+			startId = prev.Id
+		case common.GREATER_THAN:
+			// This one is easy.
+			startId = prev.Id
+		default:
+			str := fmt.Sprintf("Can't use a %v predicate as a source.", pred.Predicate.String())
+			lg.Error(str + "\n")
+			panic(str)
+		}
+		if pred.Field == common.SPAN_ID {
+			pred.key = startId.Val()
+			searchKey = append([]byte{src.keyPrefix}, startId.Val()...)
+		} else {
+			// Start where the previous query left off.  This means adjusting
+			// our uintKey.
+			pred.key = pred.extractRelevantSpanData(prev)
+			searchKey = append(append([]byte{src.keyPrefix}, pred.key...),
+				startId.Val()...)
+		}
+		if lg.TraceEnabled() {
+			lg.Tracef("Handling continuation token %s for %s.  startId=%d, "+
+				"pred.uintKey=%s\n", prev, pred.Predicate.String(), startId,
+				hex.EncodeToString(pred.key))
+		}
+	} else {
+		searchKey = append([]byte{src.keyPrefix}, pred.key...)
+	}
+	for i := range src.iters {
+		src.iters[i].Seek(searchKey)
+	}
+	ret = &src
+	return ret, nil
+}
+
+// A source of spans.
+type source struct {
+	store     *dataStore
+	pred      *predicateData
+	shards    []*shard
+	iters     []*levigo.Iterator
+	nexts     []*common.Span
+	numRead   []int
+	keyPrefix byte
+}
+
+func CreateReaperSource(shd *shard) (*source, error) {
+	store := shd.store
+	p := &common.Predicate{
+		Op:    common.GREATER_THAN_OR_EQUALS,
+		Field: common.BEGIN_TIME,
+		Val:   common.INVALID_SPAN_ID.String(),
+	}
+	pred, err := loadPredicateData(p)
+	if err != nil {
+		return nil, err
+	}
+	src := &source{
+		store:     store,
+		pred:      pred,
+		shards:    []*shard{shd},
+		iters:     make([]*levigo.Iterator, 1),
+		nexts:     make([]*common.Span, 1),
+		numRead:   make([]int, 1),
+		keyPrefix: pred.getIndexPrefix(),
+	}
+	iter := shd.ldb.NewIterator(store.readOpts)
+	src.iters[0] = iter
+	searchKey := append(append([]byte{src.keyPrefix}, pred.key...),
+		pred.key...)
+	iter.Seek(searchKey)
+	return src, nil
+}
+
+// Fill in the entry in the 'next' array for a specific shard.
+func (src *source) populateNextFromShard(shardIdx int) {
+	lg := src.store.lg
+	var err error
+	iter := src.iters[shardIdx]
+	shdPath := src.shards[shardIdx].path
+	if iter == nil {
+		lg.Debugf("Can't populate: No more entries in shard %s\n", shdPath)
+		return // There are no more entries in this shard.
+	}
+	if src.nexts[shardIdx] != nil {
+		lg.Debugf("No need to populate shard %s\n", shdPath)
+		return // We already have a valid entry for this shard.
+	}
+	for {
+		if !iter.Valid() {
+			lg.Debugf("Can't populate: Iterator for shard %s is no longer valid.\n", shdPath)
+			break // Can't read past end of DB
+		}
+		src.numRead[shardIdx]++
+		key := iter.Key()
+		if len(key) < 1 {
+			lg.Warnf("Encountered invalid zero-byte key in shard %s.\n", shdPath)
+			break
+		}
+		ret := src.checkKeyPrefix(key[0], iter)
+		if ret == NOT_SATISFIED {
+			break // Can't read past end of indexed section
+		} else if ret == NOT_YET_SATISFIED {
+			if src.pred.Op.IsDescending() {
+				iter.Prev()
+			} else {
+				iter.Next()
+			}
+			continue // Try again because we are not yet at the indexed section.
+		}
+		var span *common.Span
+		var sid common.SpanId
+		if src.keyPrefix == SPAN_ID_INDEX_PREFIX {
+			// The span id maps to the span itself.
+			sid = common.SpanId(key[1:17])
+			span, err = src.shards[shardIdx].decodeSpan(sid, iter.Value())
+			if err != nil {
+				if lg.DebugEnabled() {
+					lg.Debugf("Internal error decoding span %s in shard %s: %s\n",
+						sid.String(), shdPath, err.Error())
+				}
+				break
+			}
+		} else {
+			// With a secondary index, we have to look up the span by id.
+			sid = common.SpanId(key[9:25])
+			span = src.shards[shardIdx].FindSpan(sid)
+			if span == nil {
+				if lg.DebugEnabled() {
+					lg.Debugf("Internal error rehydrating span %s in shard %s\n",
+						sid.String(), shdPath)
+				}
+				break
+			}
+		}
+		if src.pred.Op.IsDescending() {
+			iter.Prev()
+		} else {
+			iter.Next()
+		}
+		ret = src.pred.satisfiedBy(span)
+		if ret == SATISFIED {
+			if lg.DebugEnabled() {
+				lg.Debugf("Populated valid span %v from shard %s.\n", sid, shdPath)
+			}
+			src.nexts[shardIdx] = span // Found valid entry
+			return
+		}
+		if ret == NOT_SATISFIED {
+			// This and subsequent entries don't satisfy predicate
+			break
+		}
+	}
+	lg.Debugf("Closing iterator for shard %s.\n", shdPath)
+	iter.Close()
+	src.iters[shardIdx] = nil
+}
+
+// Check the key prefix against the key prefix of the query.
+func (src *source) checkKeyPrefix(kp byte, iter *levigo.Iterator) satisfiedByReturn {
+	if kp == src.keyPrefix {
+		return SATISFIED
+	} else if kp < src.keyPrefix {
+		if src.pred.Op.IsDescending() {
+			return NOT_SATISFIED
+		} else {
+			return NOT_YET_SATISFIED
+		}
+	} else {
+		if src.pred.Op.IsDescending() {
+			return NOT_YET_SATISFIED
+		} else {
+			return NOT_SATISFIED
+		}
+	}
+}
+
+func (src *source) next() *common.Span {
+	for shardIdx := range src.shards {
+		src.populateNextFromShard(shardIdx)
+	}
+	var best *common.Span
+	bestIdx := -1
+	for shardIdx := range src.iters {
+		span := src.nexts[shardIdx]
+		if src.pred.spanPtrIsBefore(span, best) {
+			best = span
+			bestIdx = shardIdx
+		}
+	}
+	if bestIdx >= 0 {
+		src.nexts[bestIdx] = nil
+	}
+	return best
+}
+
+func (src *source) Close() {
+	for i := range src.iters {
+		if src.iters[i] != nil {
+			src.iters[i].Close()
+		}
+	}
+	src.iters = nil
+}
+
+func (src *source) getStats() string {
+	ret := fmt.Sprintf("Source stats: pred = %s", src.pred.String())
+	prefix := ". "
+	for shardIdx := range src.shards {
+		next := fmt.Sprintf("%sRead %d spans from %s", prefix,
+			src.numRead[shardIdx], src.shards[shardIdx].path)
+		prefix = ", "
+		ret = ret + next
+	}
+	return ret
+}
+
+func (store *dataStore) obtainSource(preds *[]*predicateData, span *common.Span) (*source, error) {
+	// Read spans from the first predicate that is indexed.
+	p := *preds
+	for i := range p {
+		pred := p[i]
+		if pred.getIndexPrefix() != INVALID_INDEX_PREFIX {
+			*preds = append(p[0:i], p[i+1:]...)
+			return pred.createSource(store, span)
+		}
+	}
+	// If there are no predicates that are indexed, read rows in order of span id.
+	spanIdPred := common.Predicate{Op: common.GREATER_THAN_OR_EQUALS,
+		Field: common.SPAN_ID,
+		Val:   common.INVALID_SPAN_ID.String(),
+	}
+	spanIdPredData, err := loadPredicateData(&spanIdPred)
+	if err != nil {
+		return nil, err
+	}
+	return spanIdPredData.createSource(store, span)
+}
+
+func (store *dataStore) HandleQuery(query *common.Query) ([]*common.Span, error, []int) {
+	lg := store.lg
+	// Parse predicate data.
+	var err error
+	preds := make([]*predicateData, len(query.Predicates))
+	for i := range query.Predicates {
+		preds[i], err = loadPredicateData(&query.Predicates[i])
+		if err != nil {
+			return nil, err, nil
+		}
+	}
+	// Get a source of rows.
+	var src *source
+	src, err = store.obtainSource(&preds, query.Prev)
+	if err != nil {
+		return nil, err, nil
+	}
+	defer src.Close()
+	if lg.DebugEnabled() {
+		lg.Debugf("HandleQuery %s: preds = %s, src = %v\n", query, preds, src)
+	}
+
+	// Filter the spans through the remaining predicates.
+	reserved := 32
+	if query.Lim < reserved {
+		reserved = query.Lim
+	}
+	ret := make([]*common.Span, 0, reserved)
+	for {
+		if len(ret) >= query.Lim {
+			if lg.DebugEnabled() {
+				lg.Debugf("HandleQuery %s: hit query limit after obtaining "+
+					"%d results. %s\n.", query, query.Lim, src.getStats())
+			}
+			break // we hit the result size limit
+		}
+		span := src.next()
+		if span == nil {
+			if lg.DebugEnabled() {
+				lg.Debugf("HandleQuery %s: found %d result(s), which are "+
+					"all that exist. %s\n", query, len(ret), src.getStats())
+			}
+			break // the source has no more spans to give
+		}
+		if lg.DebugEnabled() {
+			lg.Debugf("src.next returned span %s\n", span.ToJson())
+		}
+		satisfied := true
+		for predIdx := range preds {
+			if preds[predIdx].satisfiedBy(span) != SATISFIED {
+				satisfied = false
+				break
+			}
+		}
+		if satisfied {
+			ret = append(ret, span)
+		}
+	}
+	return ret, nil, src.numRead
+}
+
+func (store *dataStore) ServerStats() *common.ServerStats {
+	serverStats := common.ServerStats{
+		Dirs: make([]common.StorageDirectoryStats, len(store.shards)),
+	}
+	for shardIdx := range store.shards {
+		shard := store.shards[shardIdx]
+		serverStats.Dirs[shardIdx].Path = shard.path
+		r := levigo.Range{
+			Start: []byte{0},
+			Limit: []byte{0xff},
+		}
+		vals := shard.ldb.GetApproximateSizes([]levigo.Range{r})
+		serverStats.Dirs[shardIdx].ApproximateBytes = vals[0]
+		serverStats.Dirs[shardIdx].LevelDbStats =
+			shard.ldb.PropertyValue("leveldb.stats")
+		store.msink.lg.Debugf("levedb.stats for %s: %s\n",
+			shard.path, shard.ldb.PropertyValue("leveldb.stats"))
+	}
+	serverStats.LastStartMs = store.startMs
+	serverStats.CurMs = common.TimeToUnixMs(time.Now().UTC())
+	serverStats.ReapedSpans = atomic.LoadUint64(&store.rpr.ReapedSpans)
+	store.msink.PopulateServerStats(&serverStats)
+	return &serverStats
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/datastore_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/datastore_test.go b/htrace-htraced/go/src/htrace/htraced/datastore_test.go
new file mode 100644
index 0000000..a7ecead
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/datastore_test.go
@@ -0,0 +1,761 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"bytes"
+	"encoding/json"
+	htrace "htrace/client"
+	"htrace/common"
+	"htrace/conf"
+	"htrace/test"
+	"math/rand"
+	"os"
+	"reflect"
+	"sort"
+	"testing"
+	"time"
+)
+
+// Test creating and tearing down a datastore.
+func TestCreateDatastore(t *testing.T) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestCreateDatastore",
+		DataDirs: make([]string, 3)}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+}
+
+var SIMPLE_TEST_SPANS []common.Span = []common.Span{
+	common.Span{Id: common.TestId("00000000000000000000000000000001"),
+		SpanData: common.SpanData{
+			Begin:       123,
+			End:         456,
+			Description: "getFileDescriptors",
+			Parents:     []common.SpanId{},
+			TracerId:    "firstd",
+		}},
+	common.Span{Id: common.TestId("00000000000000000000000000000002"),
+		SpanData: common.SpanData{
+			Begin:       125,
+			End:         200,
+			Description: "openFd",
+			Parents:     []common.SpanId{common.TestId("00000000000000000000000000000001")},
+			TracerId:    "secondd",
+		}},
+	common.Span{Id: common.TestId("00000000000000000000000000000003"),
+		SpanData: common.SpanData{
+			Begin:       200,
+			End:         456,
+			Description: "passFd",
+			Parents:     []common.SpanId{common.TestId("00000000000000000000000000000001")},
+			TracerId:    "thirdd",
+		}},
+}
+
+func createSpans(spans []common.Span, store *dataStore) {
+	ing := store.NewSpanIngestor(store.lg, "127.0.0.1", "")
+	for idx := range spans {
+		ing.IngestSpan(&spans[idx])
+	}
+	ing.Close(time.Now())
+	store.WrittenSpans.Waits(int64(len(spans)))
+}
+
+// Test creating a datastore and adding some spans.
+func TestDatastoreWriteAndRead(t *testing.T) {
+	t.Parallel()
+	htraceBld := &MiniHTracedBuilder{Name: "TestDatastoreWriteAndRead",
+		Cnf: map[string]string{
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
+		},
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	createSpans(SIMPLE_TEST_SPANS, ht.Store)
+
+	span := ht.Store.FindSpan(common.TestId("00000000000000000000000000000001"))
+	if span == nil {
+		t.Fatal()
+	}
+	if !span.Id.Equal(common.TestId("00000000000000000000000000000001")) {
+		t.Fatal()
+	}
+	common.ExpectSpansEqual(t, &SIMPLE_TEST_SPANS[0], span)
+	children := ht.Store.FindChildren(common.TestId("00000000000000000000000000000001"), 1)
+	if len(children) != 1 {
+		t.Fatalf("expected 1 child, but got %d\n", len(children))
+	}
+	children = ht.Store.FindChildren(common.TestId("00000000000000000000000000000001"), 2)
+	if len(children) != 2 {
+		t.Fatalf("expected 2 children, but got %d\n", len(children))
+	}
+	sort.Sort(common.SpanIdSlice(children))
+	if !children[0].Equal(common.TestId("00000000000000000000000000000002")) {
+		t.Fatal()
+	}
+	if !children[1].Equal(common.TestId("00000000000000000000000000000003")) {
+		t.Fatal()
+	}
+}
+
+func testQuery(t *testing.T, ht *MiniHTraced, query *common.Query,
+	expectedSpans []common.Span) {
+	testQueryExt(t, ht, query, expectedSpans, nil)
+}
+
+func testQueryExt(t *testing.T, ht *MiniHTraced, query *common.Query,
+	expectedSpans []common.Span, expectedNumScanned []int) {
+	spans, err, numScanned := ht.Store.HandleQuery(query)
+	if err != nil {
+		t.Fatalf("Query %s failed: %s\n", query.String(), err.Error())
+	}
+	expectedBuf := new(bytes.Buffer)
+	dec := json.NewEncoder(expectedBuf)
+	err = dec.Encode(expectedSpans)
+	if err != nil {
+		t.Fatalf("Failed to encode expectedSpans to JSON: %s\n", err.Error())
+	}
+	spansBuf := new(bytes.Buffer)
+	dec = json.NewEncoder(spansBuf)
+	err = dec.Encode(spans)
+	if err != nil {
+		t.Fatalf("Failed to encode result spans to JSON: %s\n", err.Error())
+	}
+	t.Logf("len(spans) = %d, len(expectedSpans) = %d\n", len(spans),
+		len(expectedSpans))
+	common.ExpectStrEqual(t, string(expectedBuf.Bytes()), string(spansBuf.Bytes()))
+	if expectedNumScanned != nil {
+		if !reflect.DeepEqual(expectedNumScanned, numScanned) {
+			t.Fatalf("Invalid values for numScanned: got %v, expected %v\n",
+				expectedNumScanned, numScanned)
+		}
+	}
+}
+
+// Test queries on the datastore.
+func TestSimpleQuery(t *testing.T) {
+	t.Parallel()
+	htraceBld := &MiniHTracedBuilder{Name: "TestSimpleQuery",
+		Cnf: map[string]string{
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
+		},
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	createSpans(SIMPLE_TEST_SPANS, ht.Store)
+
+	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
+
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.GREATER_THAN_OR_EQUALS,
+				Field: common.BEGIN_TIME,
+				Val:   "125",
+			},
+		},
+		Lim: 5,
+	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[2]})
+}
+
+func TestQueries2(t *testing.T) {
+	t.Parallel()
+	htraceBld := &MiniHTracedBuilder{Name: "TestQueries2",
+		Cnf: map[string]string{
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
+		},
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	createSpans(SIMPLE_TEST_SPANS, ht.Store)
+	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.LESS_THAN_OR_EQUALS,
+				Field: common.BEGIN_TIME,
+				Val:   "125",
+			},
+		},
+		Lim: 5,
+	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[0]})
+
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.LESS_THAN_OR_EQUALS,
+				Field: common.BEGIN_TIME,
+				Val:   "125",
+			},
+			common.Predicate{
+				Op:    common.EQUALS,
+				Field: common.DESCRIPTION,
+				Val:   "getFileDescriptors",
+			},
+		},
+		Lim: 2,
+	}, []common.Span{SIMPLE_TEST_SPANS[0]})
+
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.EQUALS,
+				Field: common.DESCRIPTION,
+				Val:   "getFileDescriptors",
+			},
+		},
+		Lim: 2,
+	}, []common.Span{SIMPLE_TEST_SPANS[0]})
+}
+
+func TestQueries3(t *testing.T) {
+	t.Parallel()
+	htraceBld := &MiniHTracedBuilder{Name: "TestQueries3",
+		Cnf: map[string]string{
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
+		},
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	createSpans(SIMPLE_TEST_SPANS, ht.Store)
+	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.CONTAINS,
+				Field: common.DESCRIPTION,
+				Val:   "Fd",
+			},
+			common.Predicate{
+				Op:    common.GREATER_THAN_OR_EQUALS,
+				Field: common.BEGIN_TIME,
+				Val:   "100",
+			},
+		},
+		Lim: 5,
+	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[2]})
+
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.LESS_THAN_OR_EQUALS,
+				Field: common.SPAN_ID,
+				Val:   common.TestId("00000000000000000000000000000000").String(),
+			},
+		},
+		Lim: 200,
+	}, []common.Span{})
+
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.LESS_THAN_OR_EQUALS,
+				Field: common.SPAN_ID,
+				Val:   common.TestId("00000000000000000000000000000002").String(),
+			},
+		},
+		Lim: 200,
+	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[0]})
+}
+
+func TestQueries4(t *testing.T) {
+	t.Parallel()
+	htraceBld := &MiniHTracedBuilder{Name: "TestQueries4",
+		Cnf: map[string]string{
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
+		},
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	createSpans(SIMPLE_TEST_SPANS, ht.Store)
+
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.GREATER_THAN,
+				Field: common.BEGIN_TIME,
+				Val:   "125",
+			},
+		},
+		Lim: 5,
+	}, []common.Span{SIMPLE_TEST_SPANS[2]})
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.GREATER_THAN_OR_EQUALS,
+				Field: common.DESCRIPTION,
+				Val:   "openFd",
+			},
+		},
+		Lim: 2,
+	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[2]})
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.GREATER_THAN,
+				Field: common.DESCRIPTION,
+				Val:   "openFd",
+			},
+		},
+		Lim: 2,
+	}, []common.Span{SIMPLE_TEST_SPANS[2]})
+}
+
+var TEST_QUERIES5_SPANS []common.Span = []common.Span{
+	common.Span{Id: common.TestId("10000000000000000000000000000001"),
+		SpanData: common.SpanData{
+			Begin:       123,
+			End:         456,
+			Description: "span1",
+			Parents:     []common.SpanId{},
+			TracerId:    "myTracer",
+		}},
+	common.Span{Id: common.TestId("10000000000000000000000000000002"),
+		SpanData: common.SpanData{
+			Begin:       123,
+			End:         200,
+			Description: "span2",
+			Parents:     []common.SpanId{common.TestId("10000000000000000000000000000001")},
+			TracerId:    "myTracer",
+		}},
+	common.Span{Id: common.TestId("10000000000000000000000000000003"),
+		SpanData: common.SpanData{
+			Begin:       124,
+			End:         457,
+			Description: "span3",
+			Parents:     []common.SpanId{common.TestId("10000000000000000000000000000001")},
+			TracerId:    "myTracer",
+		}},
+}
+
+func TestQueries5(t *testing.T) {
+	t.Parallel()
+	htraceBld := &MiniHTracedBuilder{Name: "TestQueries5",
+		WrittenSpans: common.NewSemaphore(0),
+		DataDirs:     make([]string, 1),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	createSpans(TEST_QUERIES5_SPANS, ht.Store)
+
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.GREATER_THAN,
+				Field: common.BEGIN_TIME,
+				Val:   "123",
+			},
+		},
+		Lim: 5,
+	}, []common.Span{TEST_QUERIES5_SPANS[2]})
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.GREATER_THAN,
+				Field: common.END_TIME,
+				Val:   "200",
+			},
+		},
+		Lim: 500,
+	}, []common.Span{TEST_QUERIES5_SPANS[0], TEST_QUERIES5_SPANS[2]})
+
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.LESS_THAN_OR_EQUALS,
+				Field: common.END_TIME,
+				Val:   "999",
+			},
+		},
+		Lim: 500,
+	}, []common.Span{TEST_QUERIES5_SPANS[2],
+		TEST_QUERIES5_SPANS[0],
+		TEST_QUERIES5_SPANS[1],
+	})
+}
+
+func BenchmarkDatastoreWrites(b *testing.B) {
+	htraceBld := &MiniHTracedBuilder{Name: "BenchmarkDatastoreWrites",
+		Cnf: map[string]string{
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
+			conf.HTRACE_LOG_LEVEL:                     "INFO",
+		},
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		b.Fatalf("Error creating MiniHTraced: %s\n", err.Error())
+	}
+	ht.Store.lg.Infof("BenchmarkDatastoreWrites: b.N = %d\n", b.N)
+	defer func() {
+		if r := recover(); r != nil {
+			ht.Store.lg.Infof("panic: %s\n", r.(error))
+		}
+		ht.Close()
+	}()
+	rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+	allSpans := make([]*common.Span, b.N)
+	for n := range allSpans {
+		allSpans[n] = test.NewRandomSpan(rnd, allSpans[0:n])
+	}
+
+	// Reset the timer to avoid including the time required to create new
+	// random spans in the benchmark total.
+	b.ResetTimer()
+
+	// Write many random spans.
+	ing := ht.Store.NewSpanIngestor(ht.Store.lg, "127.0.0.1", "")
+	for n := 0; n < b.N; n++ {
+		ing.IngestSpan(allSpans[n])
+	}
+	ing.Close(time.Now())
+	// Wait for all the spans to be written.
+	ht.Store.WrittenSpans.Waits(int64(b.N))
+	assertNumWrittenEquals(b, ht.Store.msink, b.N)
+}
+
+func verifySuccessfulLoad(t *testing.T, allSpans common.SpanSlice,
+	dataDirs []string) {
+	htraceBld := &MiniHTracedBuilder{
+		Name:                "TestReloadDataStore#verifySuccessfulLoad",
+		DataDirs:            dataDirs,
+		KeepDataDirsOnClose: true,
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	defer ht.Close()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	defer hcl.Close()
+	for i := 0; i < len(allSpans); i++ {
+		span, err := hcl.FindSpan(allSpans[i].Id)
+		if err != nil {
+			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
+		}
+		common.ExpectSpansEqual(t, allSpans[i], span)
+	}
+	// Look up the spans we wrote.
+	var span *common.Span
+	for i := 0; i < len(allSpans); i++ {
+		span, err = hcl.FindSpan(allSpans[i].Id)
+		if err != nil {
+			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
+		}
+		common.ExpectSpansEqual(t, allSpans[i], span)
+	}
+}
+
+func verifyFailedLoad(t *testing.T, dataDirs []string, expectedErr string) {
+	htraceBld := &MiniHTracedBuilder{
+		Name:                "TestReloadDataStore#verifyFailedLoad",
+		DataDirs:            dataDirs,
+		KeepDataDirsOnClose: true,
+	}
+	_, err := htraceBld.Build()
+	if err == nil {
+		t.Fatalf("expected failure to load, but the load succeeded.")
+	}
+	common.AssertErrContains(t, err, expectedErr)
+}
+
+func TestReloadDataStore(t *testing.T) {
+	htraceBld := &MiniHTracedBuilder{Name: "TestReloadDataStore",
+		Cnf: map[string]string{
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
+		},
+		DataDirs:            make([]string, 2),
+		KeepDataDirsOnClose: true,
+		WrittenSpans:        common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+	dataDirs := make([]string, len(ht.DataDirs))
+	copy(dataDirs, ht.DataDirs)
+	defer func() {
+		if ht != nil {
+			ht.Close()
+		}
+		for i := range dataDirs {
+			os.RemoveAll(dataDirs[i])
+		}
+	}()
+	var hcl *htrace.Client
+	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
+	if err != nil {
+		t.Fatalf("failed to create client: %s", err.Error())
+	}
+	hcnf := ht.Cnf.Clone()
+
+	// Create some random trace spans.
+	NUM_TEST_SPANS := 5
+	allSpans := createRandomTestSpans(NUM_TEST_SPANS)
+	err = hcl.WriteSpans(allSpans)
+	if err != nil {
+		t.Fatalf("WriteSpans failed: %s\n", err.Error())
+	}
+	ht.Store.WrittenSpans.Waits(int64(NUM_TEST_SPANS))
+
+	// Look up the spans we wrote.
+	var span *common.Span
+	for i := 0; i < NUM_TEST_SPANS; i++ {
+		span, err = hcl.FindSpan(allSpans[i].Id)
+		if err != nil {
+			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
+		}
+		common.ExpectSpansEqual(t, allSpans[i], span)
+	}
+	hcl.Close()
+	ht.Close()
+	ht = nil
+
+	// Verify that we can reload the datastore, even if we configure the data
+	// directories in a different order.
+	verifySuccessfulLoad(t, allSpans, []string{dataDirs[1], dataDirs[0]})
+
+	// If we try to reload the datastore with only one directory, it won't work
+	// (we need both).
+	verifyFailedLoad(t, []string{dataDirs[1]},
+		"The TotalShards field of all shards is 2, but we have 1 shards.")
+
+	// Test that we give an intelligent error message when 0 directories are
+	// configured.
+	verifyFailedLoad(t, []string{}, "No shard directories found.")
+
+	// Can't specify the same directory more than once... will get "lock
+	// already held by process"
+	verifyFailedLoad(t, []string{dataDirs[0], dataDirs[1], dataDirs[1]},
+		" already held by process.")
+
+	// Open the datastore and modify it to have the wrong DaemonId
+	dld := NewDataStoreLoader(hcnf)
+	defer func() {
+		if dld != nil {
+			dld.Close()
+			dld = nil
+		}
+	}()
+	dld.LoadShards()
+	sinfo, err := dld.shards[0].readShardInfo()
+	if err != nil {
+		t.Fatalf("error reading shard info for shard %s: %s\n",
+			dld.shards[0].path, err.Error())
+	}
+	newDaemonId := sinfo.DaemonId + 1
+	dld.lg.Infof("Read %s from shard %s.  Changing daemonId to 0x%016x\n.",
+		asJson(sinfo), dld.shards[0].path, newDaemonId)
+	sinfo.DaemonId = newDaemonId
+	err = dld.shards[0].writeShardInfo(sinfo)
+	if err != nil {
+		t.Fatalf("error writing shard info for shard %s: %s\n",
+			dld.shards[0].path, err.Error())
+	}
+	dld.Close()
+	dld = nil
+	verifyFailedLoad(t, dataDirs, "DaemonId mismatch.")
+
+	// Open the datastore and modify it to have the wrong TotalShards
+	dld = NewDataStoreLoader(hcnf)
+	dld.LoadShards()
+	sinfo, err = dld.shards[0].readShardInfo()
+	if err != nil {
+		t.Fatalf("error reading shard info for shard %s: %s\n",
+			dld.shards[0].path, err.Error())
+	}
+	newDaemonId = sinfo.DaemonId - 1
+	dld.lg.Infof("Read %s from shard %s.  Changing daemonId to 0x%016x, "+
+		"TotalShards to 3\n.",
+		asJson(sinfo), dld.shards[0].path, newDaemonId)
+	sinfo.DaemonId = newDaemonId
+	sinfo.TotalShards = 3
+	err = dld.shards[0].writeShardInfo(sinfo)
+	if err != nil {
+		t.Fatalf("error writing shard info for shard %s: %s\n",
+			dld.shards[0].path, err.Error())
+	}
+	dld.Close()
+	dld = nil
+	verifyFailedLoad(t, dataDirs, "TotalShards mismatch.")
+
+	// Open the datastore and modify it to have the wrong LayoutVersion
+	dld = NewDataStoreLoader(hcnf)
+	dld.LoadShards()
+	for shardIdx := range dld.shards {
+		sinfo, err = dld.shards[shardIdx].readShardInfo()
+		if err != nil {
+			t.Fatalf("error reading shard info for shard %s: %s\n",
+				dld.shards[shardIdx].path, err.Error())
+		}
+		dld.lg.Infof("Read %s from shard %s.  Changing TotalShards to 2, "+
+			"LayoutVersion to 2\n", asJson(sinfo), dld.shards[shardIdx].path)
+		sinfo.TotalShards = 2
+		sinfo.LayoutVersion = 2
+		err = dld.shards[shardIdx].writeShardInfo(sinfo)
+		if err != nil {
+			t.Fatalf("error writing shard info for shard %s: %s\n",
+				dld.shards[0].path, err.Error())
+		}
+	}
+	dld.Close()
+	dld = nil
+	verifyFailedLoad(t, dataDirs, "The layout version of all shards is 2, "+
+		"but we only support")
+
+	// It should work with data.store.clear set.
+	htraceBld = &MiniHTracedBuilder{
+		Name:                "TestReloadDataStore#clear",
+		DataDirs:            dataDirs,
+		KeepDataDirsOnClose: true,
+		Cnf:                 map[string]string{conf.HTRACE_DATA_STORE_CLEAR: "true"},
+	}
+	ht, err = htraceBld.Build()
+	if err != nil {
+		t.Fatalf("failed to create datastore: %s", err.Error())
+	}
+}
+
+func TestQueriesWithContinuationTokens1(t *testing.T) {
+	t.Parallel()
+	htraceBld := &MiniHTracedBuilder{Name: "TestQueriesWithContinuationTokens1",
+		Cnf: map[string]string{
+			conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000",
+		},
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	createSpans(SIMPLE_TEST_SPANS, ht.Store)
+	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
+	// Adding a prev value to this query excludes the first result that we
+	// would normally get.
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.GREATER_THAN,
+				Field: common.BEGIN_TIME,
+				Val:   "120",
+			},
+		},
+		Lim:  5,
+		Prev: &SIMPLE_TEST_SPANS[0],
+	}, []common.Span{SIMPLE_TEST_SPANS[1], SIMPLE_TEST_SPANS[2]})
+
+	// There is only one result from an EQUALS query on SPAN_ID.
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.EQUALS,
+				Field: common.SPAN_ID,
+				Val:   common.TestId("00000000000000000000000000000001").String(),
+			},
+		},
+		Lim:  100,
+		Prev: &SIMPLE_TEST_SPANS[0],
+	}, []common.Span{})
+
+	// When doing a LESS_THAN_OR_EQUALS search, we still don't get back the
+	// span we pass as a continuation token. (Primary index edition).
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.LESS_THAN_OR_EQUALS,
+				Field: common.SPAN_ID,
+				Val:   common.TestId("00000000000000000000000000000002").String(),
+			},
+		},
+		Lim:  100,
+		Prev: &SIMPLE_TEST_SPANS[1],
+	}, []common.Span{SIMPLE_TEST_SPANS[0]})
+
+	// When doing a GREATER_THAN_OR_EQUALS search, we still don't get back the
+	// span we pass as a continuation token. (Secondary index edition).
+	testQuery(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.GREATER_THAN,
+				Field: common.DURATION,
+				Val:   "0",
+			},
+		},
+		Lim:  100,
+		Prev: &SIMPLE_TEST_SPANS[1],
+	}, []common.Span{SIMPLE_TEST_SPANS[2], SIMPLE_TEST_SPANS[0]})
+}
+
+func TestQueryRowsScanned(t *testing.T) {
+	t.Parallel()
+	htraceBld := &MiniHTracedBuilder{Name: "TestQueryRowsScanned",
+		WrittenSpans: common.NewSemaphore(0),
+	}
+	ht, err := htraceBld.Build()
+	if err != nil {
+		panic(err)
+	}
+	defer ht.Close()
+	createSpans(SIMPLE_TEST_SPANS, ht.Store)
+	assertNumWrittenEquals(t, ht.Store.msink, len(SIMPLE_TEST_SPANS))
+	testQueryExt(t, ht, &common.Query{
+		Predicates: []common.Predicate{
+			common.Predicate{
+				Op:    common.EQUALS,
+				Field: common.SPAN_ID,
+				Val:   common.TestId("00000000000000000000000000000001").String(),
+			},
+		},
+		Lim:  100,
+		Prev: nil,
+	}, []common.Span{SIMPLE_TEST_SPANS[0]},
+		[]int{2, 1})
+}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/htrace/htraced/heartbeater.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/htrace/htraced/heartbeater.go b/htrace-htraced/go/src/htrace/htraced/heartbeater.go
new file mode 100644
index 0000000..3f4c951
--- /dev/null
+++ b/htrace-htraced/go/src/htrace/htraced/heartbeater.go
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package main
+
+import (
+	"htrace/common"
+	"sync"
+	"time"
+)
+
+type Heartbeater struct {
+	// The name of this heartbeater
+	name string
+
+	// How long to sleep between heartbeats, in milliseconds.
+	periodMs int64
+
+	// The logger to use.
+	lg *common.Logger
+
+	// The channels to send the heartbeat on.
+	targets []HeartbeatTarget
+
+	// Incoming requests to the heartbeater.  When this is closed, the
+	// heartbeater will exit.
+	req chan *HeartbeatTarget
+
+	wg sync.WaitGroup
+}
+
+type HeartbeatTarget struct {
+	// The name of the heartbeat target.
+	name string
+
+	// The channel for the heartbeat target.
+	targetChan chan interface{}
+}
+
+func (tgt *HeartbeatTarget) String() string {
+	return tgt.name
+}
+
+func NewHeartbeater(name string, periodMs int64, lg *common.Logger) *Heartbeater {
+	hb := &Heartbeater{
+		name:     name,
+		periodMs: periodMs,
+		lg:       lg,
+		targets:  make([]HeartbeatTarget, 0, 4),
+		req:      make(chan *HeartbeatTarget),
+	}
+	hb.wg.Add(1)
+	go hb.run()
+	return hb
+}
+
+func (hb *Heartbeater) AddHeartbeatTarget(tgt *HeartbeatTarget) {
+	hb.req <- tgt
+}
+
+func (hb *Heartbeater) Shutdown() {
+	close(hb.req)
+	hb.wg.Wait()
+}
+
+func (hb *Heartbeater) String() string {
+	return hb.name
+}
+
+func (hb *Heartbeater) run() {
+	defer func() {
+		hb.lg.Debugf("%s: exiting.\n", hb.String())
+		hb.wg.Done()
+	}()
+	period := time.Duration(hb.periodMs) * time.Millisecond
+	for {
+		periodEnd := time.Now().Add(period)
+		for {
+			timeToWait := periodEnd.Sub(time.Now())
+			if timeToWait <= 0 {
+				break
+			} else if timeToWait > period {
+				// Smooth over jitter or clock changes
+				timeToWait = period
+				periodEnd = time.Now().Add(period)
+			}
+			select {
+			case tgt, open := <-hb.req:
+				if !open {
+					return
+				}
+				hb.targets = append(hb.targets, *tgt)
+				hb.lg.Debugf("%s: added %s.\n", hb.String(), tgt.String())
+			case <-time.After(timeToWait):
+			}
+		}
+		for targetIdx := range hb.targets {
+			select {
+			case hb.targets[targetIdx].targetChan <- nil:
+			default:
+				// We failed to send a heartbeat because the other goroutine was busy and
+				// hasn't cleared the previous one from its channel.  This could indicate a
+				// stuck goroutine.
+				hb.lg.Infof("%s: could not send heartbeat to %s.\n",
+					hb.String(), hb.targets[targetIdx])
+			}
+		}
+	}
+}


[3/7] incubator-htrace git commit: HTRACE-357. Rename htrace-htraced/go/src/org/apache/htrace to htrace-htraced/go/src/htrace (Colin Patrick McCabe via iwasakims)

Posted by iw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/common/time_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/common/time_test.go b/htrace-htraced/go/src/org/apache/htrace/common/time_test.go
deleted file mode 100644
index 11e2733..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/common/time_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package common
-
-import (
-	"testing"
-)
-
-func testRoundTrip(t *testing.T, u int64) {
-	tme := UnixMsToTime(u)
-	u2 := TimeToUnixMs(tme)
-	if u2 != u {
-		t.Fatalf("Error taking %d on a round trip: came back as "+
-			"%d instead.\n", u, u2)
-	}
-}
-
-func TestTimeConversions(t *testing.T) {
-	testRoundTrip(t, 0)
-	testRoundTrip(t, 1445540632000)
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/conf/config.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/conf/config.go b/htrace-htraced/go/src/org/apache/htrace/conf/config.go
deleted file mode 100644
index 24170b2..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/conf/config.go
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package conf
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"io"
-	"log"
-	"os"
-	"path/filepath"
-	"sort"
-	"strconv"
-	"strings"
-	"syscall"
-)
-
-//
-// The configuration code for HTraced.
-//
-// HTraced can be configured via Hadoop-style XML configuration files, or by passing -Dkey=value
-// command line arguments.  Command-line arguments without an equals sign, such as "-Dkey", will be
-// treated as setting the key to "true".
-//
-// Configuration key constants should be defined in config_keys.go.  Each key should have a default,
-// which will be used if the user supplies no value, or supplies an invalid value.
-// For that reason, it is not necessary for the Get, GetInt, etc. functions to take a default value
-// argument.
-//
-// Configuration objects are immutable.  However, you can make a copy of a configuration which adds
-// some changes using Configuration#Clone().
-//
-
-type Config struct {
-	settings map[string]string
-	defaults map[string]string
-}
-
-type Builder struct {
-	// If non-nil, the XML configuration file to read.
-	Reader io.Reader
-
-	// If non-nil, the configuration values to use.
-	Values map[string]string
-
-	// If non-nil, the default configuration values to use.
-	Defaults map[string]string
-
-	// If non-nil, the command-line arguments to use.
-	Argv []string
-
-	// The name of the application.  Configuration keys that start with this
-	// string will be converted to their unprefixed forms.
-	AppPrefix string
-}
-
-func getDefaultHTracedConfDir() string {
-	return PATH_SEP + "etc" + PATH_SEP + "htraced" + PATH_SEP + "conf"
-}
-
-func getHTracedConfDirs(dlog io.Writer) []string {
-	confDir := os.Getenv("HTRACED_CONF_DIR")
-	paths := filepath.SplitList(confDir)
-	if len(paths) < 1 {
-		def := getDefaultHTracedConfDir()
-		io.WriteString(dlog, fmt.Sprintf("HTRACED_CONF_DIR defaulting to %s\n", def))
-		return []string{def}
-	}
-	io.WriteString(dlog, fmt.Sprintf("HTRACED_CONF_DIR=%s\n", confDir))
-	return paths
-}
-
-// Load a configuration from the application's argv, configuration file, and the standard
-// defaults.
-func LoadApplicationConfig(appPrefix string) (*Config, io.Reader) {
-	dlog := new(bytes.Buffer)
-	reader := openFile(CONFIG_FILE_NAME, getHTracedConfDirs(dlog), dlog)
-	bld := Builder{}
-	if reader != nil {
-		defer reader.Close()
-		bld.Reader = bufio.NewReader(reader)
-	}
-	bld.Argv = os.Args[1:]
-	bld.Defaults = DEFAULTS
-	bld.AppPrefix = appPrefix
-	cnf, err := bld.Build()
-	if err != nil {
-		log.Fatal("Error building configuration: " + err.Error())
-	}
-	os.Args = append(os.Args[0:1], bld.Argv...)
-	keys := make(sort.StringSlice, 0, 20)
-	for k, _ := range cnf.settings {
-		keys = append(keys, k)
-	}
-	sort.Sort(keys)
-	prefix := ""
-	io.WriteString(dlog, "Read configuration: ")
-	for i := range keys {
-		io.WriteString(dlog, fmt.Sprintf(`%s%s = "%s"`,
-			prefix, keys[i], cnf.settings[keys[i]]))
-		prefix = ", "
-	}
-	return cnf, dlog
-}
-
-// Attempt to open a configuration file somewhere on the provided list of paths.
-func openFile(cnfName string, paths []string, dlog io.Writer) io.ReadCloser {
-	for p := range paths {
-		path := fmt.Sprintf("%s%c%s", paths[p], os.PathSeparator, cnfName)
-		file, err := os.Open(path)
-		if err == nil {
-			io.WriteString(dlog, fmt.Sprintf("Reading configuration from %s.\n", path))
-			return file
-		}
-		if e, ok := err.(*os.PathError); ok && e.Err == syscall.ENOENT {
-			continue
-		}
-		io.WriteString(dlog, fmt.Sprintf("Error opening %s for read: %s\n", path, err.Error()))
-	}
-	return nil
-}
-
-// Try to parse a command-line element as a key=value pair.
-func parseAsConfigFlag(flag string) (string, string) {
-	var confPart string
-	if strings.HasPrefix(flag, "-D") {
-		confPart = flag[2:]
-	} else if strings.HasPrefix(flag, "--D") {
-		confPart = flag[3:]
-	} else {
-		return "", ""
-	}
-	if len(confPart) == 0 {
-		return "", ""
-	}
-	idx := strings.Index(confPart, "=")
-	if idx == -1 {
-		return confPart, "true"
-	}
-	return confPart[0:idx], confPart[idx+1:]
-}
-
-// Build a new configuration object from the provided conf.Builder.
-func (bld *Builder) Build() (*Config, error) {
-	// Load values and defaults
-	cnf := Config{}
-	cnf.settings = make(map[string]string)
-	if bld.Values != nil {
-		for k, v := range bld.Values {
-			cnf.settings[k] = v
-		}
-	}
-	cnf.defaults = make(map[string]string)
-	if bld.Defaults != nil {
-		for k, v := range bld.Defaults {
-			cnf.defaults[k] = v
-		}
-	}
-
-	// Process the configuration file, if we have one
-	if bld.Reader != nil {
-		parseXml(bld.Reader, cnf.settings)
-	}
-
-	// Process command line arguments
-	var i int
-	for i < len(bld.Argv) {
-		str := bld.Argv[i]
-		key, val := parseAsConfigFlag(str)
-		if key != "" {
-			cnf.settings[key] = val
-			bld.Argv = append(bld.Argv[:i], bld.Argv[i+1:]...)
-		} else {
-			i++
-		}
-	}
-	cnf.settings = bld.removeApplicationPrefixes(cnf.settings)
-	cnf.defaults = bld.removeApplicationPrefixes(cnf.defaults)
-	return &cnf, nil
-}
-
-func (bld *Builder) removeApplicationPrefixes(in map[string]string) map[string]string {
-	out := make(map[string]string)
-	for k, v := range in {
-		if strings.HasPrefix(k, bld.AppPrefix) {
-			out[k[len(bld.AppPrefix):]] = v
-		} else {
-			out[k] = v
-		}
-	}
-	return out
-}
-
-// Returns true if the configuration has a non-default value for the given key.
-func (cnf *Config) Contains(key string) bool {
-	_, ok := cnf.settings[key]
-	return ok
-}
-
-// Get a string configuration key.
-func (cnf *Config) Get(key string) string {
-	ret, hadKey := cnf.settings[key]
-	if hadKey {
-		return ret
-	}
-	return cnf.defaults[key]
-}
-
-// Get a boolean configuration key.
-func (cnf *Config) GetBool(key string) bool {
-	str := cnf.settings[key]
-	ret, err := strconv.ParseBool(str)
-	if err == nil {
-		return ret
-	}
-	str = cnf.defaults[key]
-	ret, err = strconv.ParseBool(str)
-	if err == nil {
-		return ret
-	}
-	return false
-}
-
-// Get an integer configuration key.
-func (cnf *Config) GetInt(key string) int {
-	str := cnf.settings[key]
-	ret, err := strconv.Atoi(str)
-	if err == nil {
-		return ret
-	}
-	str = cnf.defaults[key]
-	ret, err = strconv.Atoi(str)
-	if err == nil {
-		return ret
-	}
-	return 0
-}
-
-// Get an int64 configuration key.
-func (cnf *Config) GetInt64(key string) int64 {
-	str := cnf.settings[key]
-	ret, err := strconv.ParseInt(str, 10, 64)
-	if err == nil {
-		return ret
-	}
-	str = cnf.defaults[key]
-	ret, err = strconv.ParseInt(str, 10, 64)
-	if err == nil {
-		return ret
-	}
-	return 0
-}
-
-// Make a deep copy of the given configuration.
-// Optionally, you can specify particular key/value pairs to change.
-// Example:
-// cnf2 := cnf.Copy("my.changed.key", "my.new.value")
-func (cnf *Config) Clone(args ...string) *Config {
-	if len(args)%2 != 0 {
-		panic("The arguments to Config#copy are key1, value1, " +
-			"key2, value2, and so on.  You must specify an even number of arguments.")
-	}
-	ncnf := &Config{defaults: cnf.defaults}
-	ncnf.settings = make(map[string]string)
-	for k, v := range cnf.settings {
-		ncnf.settings[k] = v
-	}
-	for i := 0; i < len(args); i += 2 {
-		ncnf.settings[args[i]] = args[i+1]
-	}
-	return ncnf
-}
-
-// Export the configuration as a map
-func (cnf *Config) Export() map[string]string {
-	m := make(map[string]string)
-	for k, v := range cnf.defaults {
-		m[k] = v
-	}
-	for k, v := range cnf.settings {
-		m[k] = v
-	}
-	return m
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/conf/config_keys.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/conf/config_keys.go b/htrace-htraced/go/src/org/apache/htrace/conf/config_keys.go
deleted file mode 100644
index 16790d8..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/conf/config_keys.go
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package conf
-
-import (
-	"fmt"
-	"os"
-)
-
-//
-// Configuration keys for HTrace.
-//
-
-// The platform-specific path separator.  Usually slash.
-var PATH_SEP string = fmt.Sprintf("%c", os.PathSeparator)
-
-// The platform-specific path list separator.  Usually colon.
-var PATH_LIST_SEP string = fmt.Sprintf("%c", os.PathListSeparator)
-
-// The name of the XML configuration file to look for.
-const CONFIG_FILE_NAME = "htraced-conf.xml"
-
-// An environment variable containing a list of paths to search for the
-// configuration file in.
-const HTRACED_CONF_DIR = "HTRACED_CONF_DIR"
-
-// The web address to start the REST server on.
-const HTRACE_WEB_ADDRESS = "web.address"
-
-// The default port for the Htrace web address.
-const HTRACE_WEB_ADDRESS_DEFAULT_PORT = 9096
-
-// The web address to start the REST server on.
-const HTRACE_HRPC_ADDRESS = "hrpc.address"
-
-// The default port for the Htrace HRPC address.
-const HTRACE_HRPC_ADDRESS_DEFAULT_PORT = 9075
-
-// The directories to put the data store into.  Separated by PATH_LIST_SEP.
-const HTRACE_DATA_STORE_DIRECTORIES = "data.store.directories"
-
-// Boolean key which indicates whether we should clear data on startup.
-const HTRACE_DATA_STORE_CLEAR = "data.store.clear"
-
-// How many writes to buffer before applying backpressure to span senders.
-const HTRACE_DATA_STORE_SPAN_BUFFER_SIZE = "data.store.span.buffer.size"
-
-// Path to put the logs from htrace, or the empty string to use stdout.
-const HTRACE_LOG_PATH = "log.path"
-
-// The log level to use for the logs in htrace.
-const HTRACE_LOG_LEVEL = "log.level"
-
-// The period between datastore heartbeats.  This is the approximate interval at which we will
-// prune expired spans.
-const HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS = "datastore.heartbeat.period.ms"
-
-// The maximum number of addresses for which we will maintain metrics.
-const HTRACE_METRICS_MAX_ADDR_ENTRIES = "metrics.max.addr.entries"
-
-// The number of milliseconds we should keep spans before discarding them.
-const HTRACE_SPAN_EXPIRY_MS = "span.expiry.ms"
-
-// The period between updates to the span reaper
-const HTRACE_REAPER_HEARTBEAT_PERIOD_MS = "reaper.heartbeat.period.ms"
-
-// A host:port pair to send information to on startup.  This is used in unit
-// tests to determine the (random) port of the htraced process that has been
-// started.
-const HTRACE_STARTUP_NOTIFICATION_ADDRESS = "startup.notification.address"
-
-// The maximum number of HRPC handler goroutines we will create at once.  If
-// this is too small, we won't get enough concurrency; if it's too big, we will
-// buffer too much data in memory while waiting for the datastore to process
-// requests.
-const HTRACE_NUM_HRPC_HANDLERS = "num.hrpc.handlers"
-
-// The I/O timeout HRPC will use, in milliseconds.  If it takes longer than
-// this to read or write a message, we will abort the connection.
-const HTRACE_HRPC_IO_TIMEOUT_MS = "hrpc.io.timeout.ms"
-
-// The leveldb write buffer size, or 0 to use the library default, which is 4
-// MB in leveldb 1.16.  See leveldb's options.h for more details.
-const HTRACE_LEVELDB_WRITE_BUFFER_SIZE = "leveldb.write.buffer.size"
-
-// The LRU cache size for leveldb, in bytes.
-const HTRACE_LEVELDB_CACHE_SIZE = "leveldb.cache.size"
-
-// Default values for HTrace configuration keys.
-var DEFAULTS = map[string]string{
-	HTRACE_WEB_ADDRESS:  fmt.Sprintf("0.0.0.0:%d", HTRACE_WEB_ADDRESS_DEFAULT_PORT),
-	HTRACE_HRPC_ADDRESS: fmt.Sprintf("0.0.0.0:%d", HTRACE_HRPC_ADDRESS_DEFAULT_PORT),
-	HTRACE_DATA_STORE_DIRECTORIES: PATH_SEP + "tmp" + PATH_SEP + "htrace1" +
-		PATH_LIST_SEP + PATH_SEP + "tmp" + PATH_SEP + "htrace2",
-	HTRACE_DATA_STORE_CLEAR:              "false",
-	HTRACE_DATA_STORE_SPAN_BUFFER_SIZE:   "100",
-	HTRACE_LOG_PATH:                      "",
-	HTRACE_LOG_LEVEL:                     "INFO",
-	HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: fmt.Sprintf("%d", 45*1000),
-	HTRACE_METRICS_MAX_ADDR_ENTRIES:      "100000",
-	HTRACE_SPAN_EXPIRY_MS:                "0",
-	HTRACE_REAPER_HEARTBEAT_PERIOD_MS:    fmt.Sprintf("%d", 90*1000),
-	HTRACE_NUM_HRPC_HANDLERS:             "20",
-	HTRACE_HRPC_IO_TIMEOUT_MS:            "60000",
-	HTRACE_LEVELDB_WRITE_BUFFER_SIZE:     "0",
-	HTRACE_LEVELDB_CACHE_SIZE:            fmt.Sprintf("%d", 100 * 1024 * 1024),
-}
-
-// Values to be used when creating test configurations
-func TEST_VALUES() map[string]string {
-	return map[string]string{
-		HTRACE_HRPC_ADDRESS:   ":0",    // use a random port for the HRPC server
-		HTRACE_LOG_LEVEL:      "TRACE", // show all log messages in tests
-		HTRACE_WEB_ADDRESS:    ":0",    // use a random port for the REST server
-		HTRACE_SPAN_EXPIRY_MS: "0",     // never time out spans (unless testing the reaper)
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/conf/config_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/conf/config_test.go b/htrace-htraced/go/src/org/apache/htrace/conf/config_test.go
deleted file mode 100644
index a681136..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/conf/config_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package conf
-
-import (
-	"bytes"
-	"os"
-	"strings"
-	"testing"
-)
-
-// Test that parsing command-line arguments of the form -Dfoo=bar works.
-func TestParseArgV(t *testing.T) {
-	t.Parallel()
-	argv := []string{"-Dfoo=bar", "-Dbaz=123", "-DsillyMode", "-Dlog.path="}
-	bld := &Builder{Argv: argv,
-		Defaults:map[string]string {
-			"log.path": "/log/path/default",
-		}}
-	cnf, err := bld.Build()
-	if err != nil {
-		t.Fatal()
-	}
-	if "bar" != cnf.Get("foo") {
-		t.Fatal()
-	}
-	if 123 != cnf.GetInt("baz") {
-		t.Fatal()
-	}
-	if !cnf.GetBool("sillyMode") {
-		t.Fatal()
-	}
-	if cnf.GetBool("otherSillyMode") {
-		t.Fatal()
-	}
-	if "" != cnf.Get("log.path") {
-		t.Fatal()
-	}
-}
-
-// Test that default values work.
-// Defaults are used only when the configuration option is not present or can't be parsed.
-func TestDefaults(t *testing.T) {
-	t.Parallel()
-	argv := []string{"-Dfoo=bar", "-Dbaz=invalidNumber"}
-	defaults := map[string]string{
-		"foo":  "notbar",
-		"baz":  "456",
-		"foo2": "4611686018427387904",
-	}
-	bld := &Builder{Argv: argv, Defaults: defaults}
-	cnf, err := bld.Build()
-	if err != nil {
-		t.Fatal()
-	}
-	if "bar" != cnf.Get("foo") {
-		t.Fatal()
-	}
-	if 456 != cnf.GetInt("baz") {
-		t.Fatal()
-	}
-	if 4611686018427387904 != cnf.GetInt64("foo2") {
-		t.Fatal()
-	}
-}
-
-// Test that we can parse our XML configuration file.
-func TestXmlConfigurationFile(t *testing.T) {
-	t.Parallel()
-	xml := `
-<?xml version="1.0"?>
-<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
-<configuration>
-  <property>
-    <name>foo.bar</name>
-    <value>123</value>
-  </property>
-  <property>
-    <name>foo.baz</name>
-    <value>xmlValue</value>
-  </property>
-  <!--<property>
-    <name>commented.out</name>
-    <value>stuff</value>
-  </property>-->
-</configuration>
-`
-	xmlReader := strings.NewReader(xml)
-	argv := []string{"-Dfoo.bar=456"}
-	defaults := map[string]string{
-		"foo.bar":     "789",
-		"cmdline.opt": "4611686018427387904",
-	}
-	bld := &Builder{Argv: argv, Defaults: defaults, Reader: xmlReader}
-	cnf, err := bld.Build()
-	if err != nil {
-		t.Fatal()
-	}
-	// The command-line argument takes precedence over the XML and the defaults.
-	if 456 != cnf.GetInt("foo.bar") {
-		t.Fatal()
-	}
-	if "xmlValue" != cnf.Get("foo.baz") {
-		t.Fatalf("foo.baz = %s", cnf.Get("foo.baz"))
-	}
-	if "" != cnf.Get("commented.out") {
-		t.Fatal()
-	}
-	if 4611686018427387904 != cnf.GetInt64("cmdline.opt") {
-		t.Fatal()
-	}
-}
-
-// Test our handling of the HTRACE_CONF_DIR environment variable.
-func TestGetHTracedConfDirs(t *testing.T) {
-	os.Setenv("HTRACED_CONF_DIR", "")
-	dlog := new(bytes.Buffer)
-	dirs := getHTracedConfDirs(dlog)
-	if len(dirs) != 1 || dirs[0] != getDefaultHTracedConfDir() {
-		t.Fatal()
-	}
-	os.Setenv("HTRACED_CONF_DIR", "/foo/bar:/baz")
-	dirs = getHTracedConfDirs(dlog)
-	if len(dirs) != 2 || dirs[0] != "/foo/bar" || dirs[1] != "/baz" {
-		t.Fatal()
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/conf/xml.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/conf/xml.go b/htrace-htraced/go/src/org/apache/htrace/conf/xml.go
deleted file mode 100644
index de14bc5..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/conf/xml.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package conf
-
-import (
-	"encoding/xml"
-	"io"
-	"log"
-)
-
-type configuration struct {
-	Properties []propertyXml `xml:"property"`
-}
-
-type propertyXml struct {
-	Name  string `xml:"name"`
-	Value string `xml:"value"`
-}
-
-// Parse an XML configuration file.
-func parseXml(reader io.Reader, m map[string]string) error {
-	dec := xml.NewDecoder(reader)
-	configurationXml := configuration{}
-	err := dec.Decode(&configurationXml)
-	if err != nil {
-		return err
-	}
-	props := configurationXml.Properties
-	for p := range props {
-		key := props[p].Name
-		value := props[p].Value
-		if key == "" {
-			log.Println("Warning: ignoring element with missing or empty <name>.")
-			continue
-		}
-		if value == "" {
-			log.Println("Warning: ignoring element with key " + key + " with missing or empty <value>.")
-			continue
-		}
-		//log.Printf("setting %s to %s\n", key, value)
-		m[key] = value
-	}
-	return nil
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/client_test.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/client_test.go b/htrace-htraced/go/src/org/apache/htrace/htraced/client_test.go
deleted file mode 100644
index 7b64914..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/client_test.go
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"fmt"
-	"github.com/ugorji/go/codec"
-	"math"
-	"math/rand"
-	htrace "org/apache/htrace/client"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"org/apache/htrace/test"
-	"sort"
-	"sync"
-	"sync/atomic"
-	"testing"
-	"time"
-)
-
-func TestClientGetServerVersion(t *testing.T) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestClientGetServerVersion",
-		DataDirs: make([]string, 2)}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	defer hcl.Close()
-	_, err = hcl.GetServerVersion()
-	if err != nil {
-		t.Fatalf("failed to call GetServerVersion: %s", err.Error())
-	}
-}
-
-func TestClientGetServerDebugInfo(t *testing.T) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestClientGetServerDebugInfo",
-		DataDirs: make([]string, 2)}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	defer hcl.Close()
-	debugInfo, err := hcl.GetServerDebugInfo()
-	if err != nil {
-		t.Fatalf("failed to call GetServerDebugInfo: %s", err.Error())
-	}
-	if debugInfo.StackTraces == "" {
-		t.Fatalf(`debugInfo.StackTraces == ""`)
-	}
-	if debugInfo.GCStats == "" {
-		t.Fatalf(`debugInfo.GCStats == ""`)
-	}
-}
-
-func createRandomTestSpans(amount int) common.SpanSlice {
-	rnd := rand.New(rand.NewSource(2))
-	allSpans := make(common.SpanSlice, amount)
-	allSpans[0] = test.NewRandomSpan(rnd, allSpans[0:0])
-	for i := 1; i < amount; i++ {
-		allSpans[i] = test.NewRandomSpan(rnd, allSpans[1:i])
-	}
-	allSpans[1].SpanData.Parents = []common.SpanId{common.SpanId(allSpans[0].Id)}
-	return allSpans
-}
-
-func TestClientOperations(t *testing.T) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestClientOperations",
-		DataDirs:     make([]string, 2),
-		WrittenSpans: common.NewSemaphore(0),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	defer hcl.Close()
-
-	// Create some random trace spans.
-	NUM_TEST_SPANS := 30
-	allSpans := createRandomTestSpans(NUM_TEST_SPANS)
-
-	// Write half of the spans to htraced via the client.
-	err = hcl.WriteSpans(allSpans[0 : NUM_TEST_SPANS/2])
-	if err != nil {
-		t.Fatalf("WriteSpans(0:%d) failed: %s\n", NUM_TEST_SPANS/2,
-			err.Error())
-	}
-	ht.Store.WrittenSpans.Waits(int64(NUM_TEST_SPANS / 2))
-
-	// Look up the first half of the spans.  They should be found.
-	var span *common.Span
-	for i := 0; i < NUM_TEST_SPANS/2; i++ {
-		span, err = hcl.FindSpan(allSpans[i].Id)
-		if err != nil {
-			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
-		}
-		common.ExpectSpansEqual(t, allSpans[i], span)
-	}
-
-	// Look up the second half of the spans.  They should not be found.
-	for i := NUM_TEST_SPANS / 2; i < NUM_TEST_SPANS; i++ {
-		span, err = hcl.FindSpan(allSpans[i].Id)
-		if err != nil {
-			t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error())
-		}
-		if span != nil {
-			t.Fatalf("Unexpectedly found a span we never write to "+
-				"the server: FindSpan(%d) succeeded\n", i)
-		}
-	}
-
-	// Test FindChildren
-	childSpan := allSpans[1]
-	parentId := childSpan.Parents[0]
-	var children []common.SpanId
-	children, err = hcl.FindChildren(parentId, 1)
-	if err != nil {
-		t.Fatalf("FindChildren(%s) failed: %s\n", parentId, err.Error())
-	}
-	if len(children) != 1 {
-		t.Fatalf("FindChildren(%s) returned an invalid number of "+
-			"children: expected %d, got %d\n", parentId, 1, len(children))
-	}
-	if !children[0].Equal(childSpan.Id) {
-		t.Fatalf("FindChildren(%s) returned an invalid child id: expected %s, "+
-			" got %s\n", parentId, childSpan.Id, children[0])
-	}
-
-	// Test FindChildren on a span that has no children
-	childlessSpan := allSpans[NUM_TEST_SPANS/2]
-	children, err = hcl.FindChildren(childlessSpan.Id, 10)
-	if err != nil {
-		t.Fatalf("FindChildren(%d) failed: %s\n", childlessSpan.Id, err.Error())
-	}
-	if len(children) != 0 {
-		t.Fatalf("FindChildren(%d) returned an invalid number of "+
-			"children: expected %d, got %d\n", childlessSpan.Id, 0, len(children))
-	}
-
-	// Test Query
-	var query common.Query
-	query = common.Query{Lim: 10}
-	spans, err := hcl.Query(&query)
-	if err != nil {
-		t.Fatalf("Query({lim: %d}) failed: %s\n", 10, err.Error())
-	}
-	if len(spans) != 10 {
-		t.Fatalf("Query({lim: %d}) returned an invalid number of "+
-			"children: expected %d, got %d\n", 10, 10, len(spans))
-	}
-}
-
-func TestDumpAll(t *testing.T) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestDumpAll",
-		DataDirs:     make([]string, 2),
-		WrittenSpans: common.NewSemaphore(0),
-		Cnf: map[string]string{
-			conf.HTRACE_LOG_LEVEL: "INFO",
-		},
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	defer hcl.Close()
-
-	NUM_TEST_SPANS := 100
-	allSpans := createRandomTestSpans(NUM_TEST_SPANS)
-	sort.Sort(allSpans)
-	err = hcl.WriteSpans(allSpans)
-	if err != nil {
-		t.Fatalf("WriteSpans failed: %s\n", err.Error())
-	}
-	ht.Store.WrittenSpans.Waits(int64(NUM_TEST_SPANS))
-	out := make(chan *common.Span, NUM_TEST_SPANS)
-	var dumpErr error
-	go func() {
-		dumpErr = hcl.DumpAll(3, out)
-	}()
-	var numSpans int
-	nextLogTime := time.Now().Add(time.Millisecond * 5)
-	for {
-		span, channelOpen := <-out
-		if !channelOpen {
-			break
-		}
-		common.ExpectSpansEqual(t, allSpans[numSpans], span)
-		numSpans++
-		if testing.Verbose() {
-			now := time.Now()
-			if !now.Before(nextLogTime) {
-				nextLogTime = now
-				nextLogTime = nextLogTime.Add(time.Millisecond * 5)
-				fmt.Printf("read back %d span(s)...\n", numSpans)
-			}
-		}
-	}
-	if numSpans != len(allSpans) {
-		t.Fatalf("expected to read %d spans... but only read %d\n",
-			len(allSpans), numSpans)
-	}
-	if dumpErr != nil {
-		t.Fatalf("got dump error %s\n", dumpErr.Error())
-	}
-}
-
-const EXAMPLE_CONF_KEY = "example.conf.key"
-const EXAMPLE_CONF_VALUE = "foo.bar.baz"
-
-func TestClientGetServerConf(t *testing.T) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestClientGetServerConf",
-		Cnf: map[string]string{
-			EXAMPLE_CONF_KEY: EXAMPLE_CONF_VALUE,
-		},
-		DataDirs: make([]string, 2)}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	defer hcl.Close()
-	serverCnf, err2 := hcl.GetServerConf()
-	if err2 != nil {
-		t.Fatalf("failed to call GetServerConf: %s", err2.Error())
-	}
-	if serverCnf[EXAMPLE_CONF_KEY] != EXAMPLE_CONF_VALUE {
-		t.Fatalf("unexpected value for %s: %s",
-			EXAMPLE_CONF_KEY, EXAMPLE_CONF_VALUE)
-	}
-}
-
-const TEST_NUM_HRPC_HANDLERS = 2
-
-const TEST_NUM_WRITESPANS = 4
-
-// Tests that HRPC limits the number of simultaneous connections being processed.
-func TestHrpcAdmissionsControl(t *testing.T) {
-	var wg sync.WaitGroup
-	wg.Add(TEST_NUM_WRITESPANS)
-	var numConcurrentHrpcCalls int32
-	testHooks := &hrpcTestHooks{
-		HandleAdmission: func() {
-			defer wg.Done()
-			n := atomic.AddInt32(&numConcurrentHrpcCalls, 1)
-			if n > TEST_NUM_HRPC_HANDLERS {
-				t.Fatalf("The number of concurrent HRPC calls went above "+
-					"%d: it's at %d\n", TEST_NUM_HRPC_HANDLERS, n)
-			}
-			time.Sleep(1 * time.Millisecond)
-			n = atomic.AddInt32(&numConcurrentHrpcCalls, -1)
-			if n >= TEST_NUM_HRPC_HANDLERS {
-				t.Fatalf("The number of concurrent HRPC calls went above "+
-					"%d: it was at %d\n", TEST_NUM_HRPC_HANDLERS, n+1)
-			}
-		},
-	}
-	htraceBld := &MiniHTracedBuilder{Name: "TestHrpcAdmissionsControl",
-		DataDirs: make([]string, 2),
-		Cnf: map[string]string{
-			conf.HTRACE_NUM_HRPC_HANDLERS: fmt.Sprintf("%d", TEST_NUM_HRPC_HANDLERS),
-		},
-		WrittenSpans:  common.NewSemaphore(0),
-		HrpcTestHooks: testHooks,
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	// Create some random trace spans.
-	allSpans := createRandomTestSpans(TEST_NUM_WRITESPANS)
-	for iter := 0; iter < TEST_NUM_WRITESPANS; iter++ {
-		go func(i int) {
-			err = hcl.WriteSpans(allSpans[i : i+1])
-			if err != nil {
-				t.Fatalf("WriteSpans failed: %s\n", err.Error())
-			}
-		}(iter)
-	}
-	wg.Wait()
-	ht.Store.WrittenSpans.Waits(int64(TEST_NUM_WRITESPANS))
-}
-
-// Tests that HRPC I/O timeouts work.
-func TestHrpcIoTimeout(t *testing.T) {
-	htraceBld := &MiniHTracedBuilder{Name: "TestHrpcIoTimeout",
-		DataDirs: make([]string, 2),
-		Cnf: map[string]string{
-			conf.HTRACE_NUM_HRPC_HANDLERS:  fmt.Sprintf("%d", TEST_NUM_HRPC_HANDLERS),
-			conf.HTRACE_HRPC_IO_TIMEOUT_MS: "1",
-		},
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		t.Fatalf("failed to create datastore: %s", err.Error())
-	}
-	defer ht.Close()
-	var hcl *htrace.Client
-	finishClient := make(chan interface{})
-	defer func() {
-		// Close the finishClient channel, if it hasn't already been closed.
-		defer func() { recover() }()
-		close(finishClient)
-	}()
-	testHooks := &htrace.TestHooks{
-		HandleWriteRequestBody: func() {
-			<-finishClient
-		},
-	}
-	hcl, err = htrace.NewClient(ht.ClientConf(), testHooks)
-	if err != nil {
-		t.Fatalf("failed to create client: %s", err.Error())
-	}
-	// Create some random trace spans.
-	allSpans := createRandomTestSpans(TEST_NUM_WRITESPANS)
-	var wg sync.WaitGroup
-	wg.Add(TEST_NUM_WRITESPANS)
-	for iter := 0; iter < TEST_NUM_WRITESPANS; iter++ {
-		go func(i int) {
-			defer wg.Done()
-			// Ignore the error return because there are internal retries in
-			// the client which will make this succeed eventually, usually.
-			// Keep in mind that we only block until we have seen
-			// TEST_NUM_WRITESPANS I/O errors in the HRPC server-- after that,
-			// we let requests through so that the test can exit cleanly.
-			hcl.WriteSpans(allSpans[i : i+1])
-		}(iter)
-	}
-	for {
-		if ht.Hsv.GetNumIoErrors() >= TEST_NUM_WRITESPANS {
-			break
-		}
-		time.Sleep(1000 * time.Nanosecond)
-	}
-	close(finishClient)
-	wg.Wait()
-}
-
-func doWriteSpans(name string, N int, maxSpansPerRpc uint32, b *testing.B) {
-	htraceBld := &MiniHTracedBuilder{Name: "doWriteSpans",
-		Cnf: map[string]string{
-			conf.HTRACE_LOG_LEVEL: "INFO",
-			conf.HTRACE_NUM_HRPC_HANDLERS: "20",
-		},
-		WrittenSpans: common.NewSemaphore(int64(1 - N)),
-	}
-	ht, err := htraceBld.Build()
-	if err != nil {
-		panic(err)
-	}
-	defer ht.Close()
-	rnd := rand.New(rand.NewSource(1))
-	allSpans := make([]*common.Span, N)
-	for n := 0; n < N; n++ {
-		allSpans[n] = test.NewRandomSpan(rnd, allSpans[0:n])
-	}
-	// Determine how many calls to WriteSpans we should make.  Each writeSpans
-	// message should be small enough so that it doesn't exceed the max RPC
-	// body length limit.  TODO: a production-quality golang client would do
-	// this internally rather than needing us to do it here in the unit test.
-	bodyLen := (4 * common.MAX_HRPC_BODY_LENGTH) / 5
-	reqs := make([][]*common.Span, 0, 4)
-	curReq := -1
-	curReqLen := bodyLen
-	var curReqSpans uint32
-	mh := new(codec.MsgpackHandle)
-	mh.WriteExt = true
-	var mbuf [8192]byte
-	buf := mbuf[:0]
-	enc := codec.NewEncoderBytes(&buf, mh)
-	for n := 0; n < N; n++ {
-		span := allSpans[n]
-		if (curReqSpans >= maxSpansPerRpc) ||
-			(curReqLen >= bodyLen) {
-			reqs = append(reqs, make([]*common.Span, 0, 16))
-			curReqLen = 0
-			curReq++
-			curReqSpans = 0
-		}
-		buf = mbuf[:0]
-		enc.ResetBytes(&buf)
-		err := enc.Encode(span)
-		if err != nil {
-			panic(fmt.Sprintf("Error encoding span %s: %s\n",
-				span.String(), err.Error()))
-		}
-		bufLen := len(buf)
-		if bufLen > (bodyLen / 5) {
-			panic(fmt.Sprintf("Span too long at %d bytes\n", bufLen))
-		}
-		curReqLen += bufLen
-		reqs[curReq] = append(reqs[curReq], span)
-		curReqSpans++
-	}
-	ht.Store.lg.Infof("num spans: %d.  num WriteSpansReq calls: %d\n", N, len(reqs))
-	var hcl *htrace.Client
-	hcl, err = htrace.NewClient(ht.ClientConf(), nil)
-	if err != nil {
-		panic(fmt.Sprintf("failed to create client: %s", err.Error()))
-	}
-	defer hcl.Close()
-
-	// Reset the timer to avoid including the time required to create new
-	// random spans in the benchmark total.
-	if b != nil {
-		b.ResetTimer()
-	}
-
-	// Write many random spans.
-	for reqIdx := range reqs {
-		go func(i int) {
-			err = hcl.WriteSpans(reqs[i])
-			if err != nil {
-				panic(fmt.Sprintf("failed to send WriteSpans request %d: %s",
-					i, err.Error()))
-			}
-		}(reqIdx)
-	}
-	// Wait for all the spans to be written.
-	ht.Store.WrittenSpans.Wait()
-}
-
-// This is a test of how quickly we can create new spans via WriteSpans RPCs.
-// Like BenchmarkDatastoreWrites, it creates b.N spans in the datastore.
-// Unlike that benchmark, it sends the spans via RPC.
-// Suggested flags for running this:
-// -tags unsafe -cpu 16 -benchtime=1m
-func BenchmarkWriteSpans(b *testing.B) {
-	doWriteSpans("BenchmarkWriteSpans", b.N, math.MaxUint32, b)
-}
-
-func TestWriteSpansRpcs(t *testing.T) {
-	doWriteSpans("TestWriteSpansRpcs", 3000, 1000, nil)
-}

http://git-wip-us.apache.org/repos/asf/incubator-htrace/blob/5737e65b/htrace-htraced/go/src/org/apache/htrace/htraced/datastore.go
----------------------------------------------------------------------
diff --git a/htrace-htraced/go/src/org/apache/htrace/htraced/datastore.go b/htrace-htraced/go/src/org/apache/htrace/htraced/datastore.go
deleted file mode 100644
index 82fb7b5..0000000
--- a/htrace-htraced/go/src/org/apache/htrace/htraced/datastore.go
+++ /dev/null
@@ -1,1340 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package main
-
-import (
-	"bytes"
-	"encoding/hex"
-	"errors"
-	"fmt"
-	"github.com/jmhodges/levigo"
-	"github.com/ugorji/go/codec"
-	"org/apache/htrace/common"
-	"org/apache/htrace/conf"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"time"
-)
-
-//
-// The data store code for HTraced.
-//
-// This code stores the trace spans.  We use levelDB here so that we don't have to store everything
-// in memory at all times.  The data is sharded across multiple levelDB databases in multiple
-// directories.  Normally, these multiple directories will be on multiple disk drives.
-//
-// The main emphasis in the HTraceD data store is on quickly and efficiently storing trace span data
-// coming from many daemons.  Durability is not as big a concern as in some data stores, since
-// losing a little bit of trace data if htraced goes down is not critical.  We use msgpack
-// for serialization.  We assume that there will be many more writes than reads.
-//
-// Schema
-// w -> ShardInfo
-// s[8-byte-big-endian-sid] -> SpanData
-// b[8-byte-big-endian-begin-time][8-byte-big-endian-child-sid] -> {}
-// e[8-byte-big-endian-end-time][8-byte-big-endian-child-sid] -> {}
-// d[8-byte-big-endian-duration][8-byte-big-endian-child-sid] -> {}
-// p[8-byte-big-endian-parent-sid][8-byte-big-endian-child-sid] -> {}
-//
-// Note that span IDs are unsigned 64-bit numbers.
-// Begin times, end times, and durations are signed 64-bit numbers.
-// In order to get LevelDB to properly compare the signed 64-bit quantities,
-// we flip the highest bit.  This way, we can get leveldb to view negative
-// quantities as less than non-negative ones.  This also means that we can do
-// all queries using unsigned 64-bit math, rather than having to special-case
-// the signed fields.
-//
-
-var EMPTY_BYTE_BUF []byte = []byte{}
-
-const SPAN_ID_INDEX_PREFIX = 's'
-const BEGIN_TIME_INDEX_PREFIX = 'b'
-const END_TIME_INDEX_PREFIX = 'e'
-const DURATION_INDEX_PREFIX = 'd'
-const PARENT_ID_INDEX_PREFIX = 'p'
-const INVALID_INDEX_PREFIX = 0
-
-// The maximum span expiry time, in milliseconds.
-// For all practical purposes this is "never" since it's more than a million years.
-const MAX_SPAN_EXPIRY_MS = 0x7ffffffffffffff
-
-type IncomingSpan struct {
-	// The address that the span was sent from.
-	Addr string
-
-	// The span.
-	*common.Span
-
-	// Serialized span data
-	SpanDataBytes []byte
-}
-
-// A single directory containing a levelDB instance.
-type shard struct {
-	// The data store that this shard is part of
-	store *dataStore
-
-	// The LevelDB instance.
-	ldb *levigo.DB
-
-	// The path to the leveldb directory this shard is managing.
-	path string
-
-	// Incoming requests to write Spans.
-	incoming chan []*IncomingSpan
-
-	// A channel for incoming heartbeats
-	heartbeats chan interface{}
-
-	// Tracks whether the shard goroutine has exited.
-	exited sync.WaitGroup
-}
-
-// Process incoming spans for a shard.
-func (shd *shard) processIncoming() {
-	lg := shd.store.lg
-	defer func() {
-		lg.Infof("Shard processor for %s exiting.\n", shd.path)
-		shd.exited.Done()
-	}()
-	for {
-		select {
-		case spans := <-shd.incoming:
-			if spans == nil {
-				return
-			}
-			totalWritten := 0
-			totalDropped := 0
-			for spanIdx := range spans {
-				err := shd.writeSpan(spans[spanIdx])
-				if err != nil {
-					lg.Errorf("Shard processor for %s got fatal error %s.\n",
-						shd.path, err.Error())
-					totalDropped++
-				} else {
-					if lg.TraceEnabled() {
-						lg.Tracef("Shard processor for %s wrote span %s.\n",
-							shd.path, spans[spanIdx].ToJson())
-					}
-					totalWritten++
-				}
-			}
-			shd.store.msink.UpdatePersisted(spans[0].Addr, totalWritten, totalDropped)
-			if shd.store.WrittenSpans != nil {
-				lg.Debugf("Shard %s incrementing WrittenSpans by %d\n", shd.path, len(spans))
-				shd.store.WrittenSpans.Posts(int64(len(spans)))
-			}
-		case <-shd.heartbeats:
-			lg.Tracef("Shard processor for %s handling heartbeat.\n", shd.path)
-			shd.pruneExpired()
-		}
-	}
-}
-
-func (shd *shard) pruneExpired() {
-	lg := shd.store.rpr.lg
-	src, err := CreateReaperSource(shd)
-	if err != nil {
-		lg.Errorf("Error creating reaper source for shd(%s): %s\n",
-			shd.path, err.Error())
-		return
-	}
-	var totalReaped uint64
-	defer func() {
-		src.Close()
-		if totalReaped > 0 {
-			atomic.AddUint64(&shd.store.rpr.ReapedSpans, totalReaped)
-		}
-	}()
-	urdate := s2u64(shd.store.rpr.GetReaperDate())
-	for {
-		span := src.next()
-		if span == nil {
-			lg.Debugf("After reaping %d span(s), no more found in shard %s "+
-				"to reap.\n", totalReaped, shd.path)
-			return
-		}
-		begin := s2u64(span.Begin)
-		if begin >= urdate {
-			lg.Debugf("After reaping %d span(s), the remaining spans in "+
-				"shard %s are new enough to be kept\n",
-				totalReaped, shd.path)
-			return
-		}
-		err = shd.DeleteSpan(span)
-		if err != nil {
-			lg.Errorf("Error deleting span %s from shd(%s): %s\n",
-				span.String(), shd.path, err.Error())
-			return
-		}
-		if lg.TraceEnabled() {
-			lg.Tracef("Reaped span %s from shard %s\n", span.String(), shd.path)
-		}
-		totalReaped++
-	}
-}
-
-// Delete a span from the shard.  Note that leveldb may retain the data until
-// compaction(s) remove it.
-func (shd *shard) DeleteSpan(span *common.Span) error {
-	batch := levigo.NewWriteBatch()
-	defer batch.Close()
-	primaryKey :=
-		append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...)
-	batch.Delete(primaryKey)
-	for parentIdx := range span.Parents {
-		key := append(append([]byte{PARENT_ID_INDEX_PREFIX},
-			span.Parents[parentIdx].Val()...), span.Id.Val()...)
-		batch.Delete(key)
-	}
-	beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX},
-		u64toSlice(s2u64(span.Begin))...), span.Id.Val()...)
-	batch.Delete(beginTimeKey)
-	endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX},
-		u64toSlice(s2u64(span.End))...), span.Id.Val()...)
-	batch.Delete(endTimeKey)
-	durationKey := append(append([]byte{DURATION_INDEX_PREFIX},
-		u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...)
-	batch.Delete(durationKey)
-	err := shd.ldb.Write(shd.store.writeOpts, batch)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// Convert a signed 64-bit number into an unsigned 64-bit number.  We flip the
-// highest bit, so that negative input values map to unsigned numbers which are
-// less than non-negative input values.
-func s2u64(val int64) uint64 {
-	ret := uint64(val)
-	ret ^= 0x8000000000000000
-	return ret
-}
-
-func u64toSlice(val uint64) []byte {
-	return []byte{
-		byte(0xff & (val >> 56)),
-		byte(0xff & (val >> 48)),
-		byte(0xff & (val >> 40)),
-		byte(0xff & (val >> 32)),
-		byte(0xff & (val >> 24)),
-		byte(0xff & (val >> 16)),
-		byte(0xff & (val >> 8)),
-		byte(0xff & (val >> 0))}
-}
-
-func (shd *shard) writeSpan(ispan *IncomingSpan) error {
-	batch := levigo.NewWriteBatch()
-	defer batch.Close()
-	span := ispan.Span
-	primaryKey :=
-		append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...)
-	batch.Put(primaryKey, ispan.SpanDataBytes)
-
-	// Add this to the parent index.
-	for parentIdx := range span.Parents {
-		key := append(append([]byte{PARENT_ID_INDEX_PREFIX},
-			span.Parents[parentIdx].Val()...), span.Id.Val()...)
-		batch.Put(key, EMPTY_BYTE_BUF)
-	}
-
-	// Add to the other secondary indices.
-	beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX},
-		u64toSlice(s2u64(span.Begin))...), span.Id.Val()...)
-	batch.Put(beginTimeKey, EMPTY_BYTE_BUF)
-	endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX},
-		u64toSlice(s2u64(span.End))...), span.Id.Val()...)
-	batch.Put(endTimeKey, EMPTY_BYTE_BUF)
-	durationKey := append(append([]byte{DURATION_INDEX_PREFIX},
-		u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...)
-	batch.Put(durationKey, EMPTY_BYTE_BUF)
-
-	err := shd.ldb.Write(shd.store.writeOpts, batch)
-	if err != nil {
-		shd.store.lg.Errorf("Error writing span %s to leveldb at %s: %s\n",
-			span.String(), shd.path, err.Error())
-		return err
-	}
-	return nil
-}
-
-func (shd *shard) FindChildren(sid common.SpanId, childIds []common.SpanId,
-	lim int32) ([]common.SpanId, int32, error) {
-	searchKey := append([]byte{PARENT_ID_INDEX_PREFIX}, sid.Val()...)
-	iter := shd.ldb.NewIterator(shd.store.readOpts)
-	defer iter.Close()
-	iter.Seek(searchKey)
-	for {
-		if !iter.Valid() {
-			break
-		}
-		if lim == 0 {
-			break
-		}
-		key := iter.Key()
-		if !bytes.HasPrefix(key, searchKey) {
-			break
-		}
-		id := common.SpanId(key[17:])
-		childIds = append(childIds, id)
-		lim--
-		iter.Next()
-	}
-	return childIds, lim, nil
-}
-
-// Close a shard.
-func (shd *shard) Close() {
-	lg := shd.store.lg
-	shd.incoming <- nil
-	lg.Infof("Waiting for %s to exit...\n", shd.path)
-	shd.exited.Wait()
-	shd.ldb.Close()
-	lg.Infof("Closed %s...\n", shd.path)
-}
-
-type Reaper struct {
-	// The logger used by the reaper
-	lg *common.Logger
-
-	// The number of milliseconds to keep spans around, in milliseconds.
-	spanExpiryMs int64
-
-	// The oldest date for which we'll keep spans.
-	reaperDate int64
-
-	// A channel used to send heartbeats to the reaper
-	heartbeats chan interface{}
-
-	// Tracks whether the reaper goroutine has exited
-	exited sync.WaitGroup
-
-	// The lock protecting reaper data.
-	lock sync.Mutex
-
-	// The reaper heartbeater
-	hb *Heartbeater
-
-	// The total number of spans which have been reaped.
-	ReapedSpans uint64
-}
-
-func NewReaper(cnf *conf.Config) *Reaper {
-	rpr := &Reaper{
-		lg:           common.NewLogger("reaper", cnf),
-		spanExpiryMs: cnf.GetInt64(conf.HTRACE_SPAN_EXPIRY_MS),
-		heartbeats:   make(chan interface{}, 1),
-	}
-	if rpr.spanExpiryMs >= MAX_SPAN_EXPIRY_MS {
-		rpr.spanExpiryMs = MAX_SPAN_EXPIRY_MS
-	} else if rpr.spanExpiryMs <= 0 {
-		rpr.spanExpiryMs = MAX_SPAN_EXPIRY_MS
-	}
-	rpr.hb = NewHeartbeater("ReaperHeartbeater",
-		cnf.GetInt64(conf.HTRACE_REAPER_HEARTBEAT_PERIOD_MS), rpr.lg)
-	rpr.exited.Add(1)
-	go rpr.run()
-	rpr.hb.AddHeartbeatTarget(&HeartbeatTarget{
-		name:       "reaper",
-		targetChan: rpr.heartbeats,
-	})
-	var when string
-	if rpr.spanExpiryMs >= MAX_SPAN_EXPIRY_MS {
-		when = "never"
-	} else {
-		when = "after " + time.Duration(rpr.spanExpiryMs).String()
-	}
-	rpr.lg.Infof("Initializing span reaper: span time out = %s.\n", when)
-	return rpr
-}
-
-func (rpr *Reaper) run() {
-	defer func() {
-		rpr.lg.Info("Exiting Reaper goroutine.\n")
-		rpr.exited.Done()
-	}()
-
-	for {
-		_, isOpen := <-rpr.heartbeats
-		if !isOpen {
-			return
-		}
-		rpr.handleHeartbeat()
-	}
-}
-
-func (rpr *Reaper) handleHeartbeat() {
-	// TODO: check dataStore fullness
-	now := common.TimeToUnixMs(time.Now().UTC())
-	d, updated := func() (int64, bool) {
-		rpr.lock.Lock()
-		defer rpr.lock.Unlock()
-		newReaperDate := now - rpr.spanExpiryMs
-		if newReaperDate > rpr.reaperDate {
-			rpr.reaperDate = newReaperDate
-			return rpr.reaperDate, true
-		} else {
-			return rpr.reaperDate, false
-		}
-	}()
-	if rpr.lg.DebugEnabled() {
-		if updated {
-			rpr.lg.Debugf("Updating UTC reaper date to %s.\n",
-				common.UnixMsToTime(d).Format(time.RFC3339))
-		} else {
-			rpr.lg.Debugf("Not updating previous reaperDate of %s.\n",
-				common.UnixMsToTime(d).Format(time.RFC3339))
-		}
-	}
-}
-
-func (rpr *Reaper) GetReaperDate() int64 {
-	rpr.lock.Lock()
-	defer rpr.lock.Unlock()
-	return rpr.reaperDate
-}
-
-func (rpr *Reaper) SetReaperDate(rdate int64) {
-	rpr.lock.Lock()
-	defer rpr.lock.Unlock()
-	rpr.reaperDate = rdate
-}
-
-func (rpr *Reaper) Shutdown() {
-	rpr.hb.Shutdown()
-	close(rpr.heartbeats)
-}
-
-// The Data Store.
-type dataStore struct {
-	lg *common.Logger
-
-	// The shards which manage our LevelDB instances.
-	shards []*shard
-
-	// The read options to use for LevelDB.
-	readOpts *levigo.ReadOptions
-
-	// The write options to use for LevelDB.
-	writeOpts *levigo.WriteOptions
-
-	// If non-null, a semaphore we will increment once for each span we receive.
-	// Used for testing.
-	WrittenSpans *common.Semaphore
-
-	// The metrics sink.
-	msink *MetricsSink
-
-	// The heartbeater which periodically asks shards to update the MetricsSink.
-	hb *Heartbeater
-
-	// The reaper for this datastore
-	rpr *Reaper
-
-	// When this datastore was started (in UTC milliseconds since the epoch)
-	startMs int64
-}
-
-func CreateDataStore(cnf *conf.Config, writtenSpans *common.Semaphore) (*dataStore, error) {
-	dld := NewDataStoreLoader(cnf)
-	defer dld.Close()
-	err := dld.Load()
-	if err != nil {
-		dld.lg.Errorf("Error loading datastore: %s\n", err.Error())
-		return nil, err
-	}
-	store := &dataStore {
-		lg: dld.lg,
-		shards: make([]*shard, len(dld.shards)),
-		readOpts: dld.readOpts,
-		writeOpts: dld.writeOpts,
-		WrittenSpans: writtenSpans,
-		msink: NewMetricsSink(cnf),
-		hb: NewHeartbeater("DatastoreHeartbeater",
-			cnf.GetInt64(conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS), dld.lg),
-		rpr: NewReaper(cnf),
-		startMs: common.TimeToUnixMs(time.Now().UTC()),
-	}
-	spanBufferSize := cnf.GetInt(conf.HTRACE_DATA_STORE_SPAN_BUFFER_SIZE)
-	for shdIdx := range store.shards {
-		shd := &shard {
-			store: store,
-			ldb: dld.shards[shdIdx].ldb,
-			path: dld.shards[shdIdx].path,
-			incoming: make(chan []*IncomingSpan, spanBufferSize),
-			heartbeats: make(chan interface{}, 1),
-		}
-		shd.exited.Add(1)
-		go shd.processIncoming()
-		store.shards[shdIdx] = shd
-		store.hb.AddHeartbeatTarget(&HeartbeatTarget{
-			name:       fmt.Sprintf("shard(%s)", shd.path),
-			targetChan: shd.heartbeats,
-		})
-	}
-	dld.DisownResources()
-	return store, nil
-}
-
-// Close the DataStore.
-func (store *dataStore) Close() {
-	if store.hb != nil {
-		store.hb.Shutdown()
-		store.hb = nil
-	}
-	for idx := range store.shards {
-		if store.shards[idx] != nil {
-			store.shards[idx].Close()
-			store.shards[idx] = nil
-		}
-	}
-	if store.rpr != nil {
-		store.rpr.Shutdown()
-		store.rpr = nil
-	}
-	if store.readOpts != nil {
-		store.readOpts.Close()
-		store.readOpts = nil
-	}
-	if store.writeOpts != nil {
-		store.writeOpts.Close()
-		store.writeOpts = nil
-	}
-	if store.lg != nil {
-		store.lg.Close()
-		store.lg = nil
-	}
-}
-
-// Get the index of the shard which stores the given spanId.
-func (store *dataStore) getShardIndex(sid common.SpanId) int {
-	return int(sid.Hash32() % uint32(len(store.shards)))
-}
-
-const WRITESPANS_BATCH_SIZE = 128
-
-// SpanIngestor is a class used internally to ingest spans from an RPC
-// endpoint.  It groups spans destined for a particular shard into small
-// batches, so that we can reduce the number of objects that need to be sent
-// over the shard's "incoming" channel.  Since sending objects over a channel
-// requires goroutine synchronization, this improves performance.
-//
-// SpanIngestor also allows us to reuse the same encoder object for many spans,
-// rather than creating a new encoder per span.  This avoids re-doing the
-// encoder setup for each span, and also generates less garbage.
-type SpanIngestor struct {
-	// The logger to use.
-	lg *common.Logger
-
-	// The dataStore we are ingesting spans into.
-	store *dataStore
-
-	// The remote address these spans are coming from.
-	addr string
-
-	// Default TracerId
-	defaultTrid string
-
-	// The msgpack handle to use to serialize the spans.
-	mh codec.MsgpackHandle
-
-	// The msgpack encoder to use to serialize the spans.
-	// Caching this avoids generating a lot of garbage and burning CPUs
-	// creating new encoder objects for each span.
-	enc *codec.Encoder
-
-	// The buffer which codec.Encoder is currently serializing to.
-	// We have to create a new buffer for each span because once we hand it off to the shard, the
-	// shard manages the buffer lifecycle.
-	spanDataBytes []byte
-
-	// An array mapping shard index to span batch.
-	batches []*SpanIngestorBatch
-
-	// The total number of spans ingested.  Includes dropped spans.
-	totalIngested int
-
-	// The total number of spans the ingestor dropped because of a server-side error.
-	serverDropped int
-}
-
-// A batch of spans destined for a particular shard.
-type SpanIngestorBatch struct {
-	incoming []*IncomingSpan
-}
-
-func (store *dataStore) NewSpanIngestor(lg *common.Logger,
-	addr string, defaultTrid string) *SpanIngestor {
-	ing := &SpanIngestor{
-		lg:            lg,
-		store:         store,
-		addr:          addr,
-		defaultTrid:   defaultTrid,
-		spanDataBytes: make([]byte, 0, 1024),
-		batches:       make([]*SpanIngestorBatch, len(store.shards)),
-	}
-	ing.mh.WriteExt = true
-	ing.enc = codec.NewEncoderBytes(&ing.spanDataBytes, &ing.mh)
-	for batchIdx := range ing.batches {
-		ing.batches[batchIdx] = &SpanIngestorBatch{
-			incoming: make([]*IncomingSpan, 0, WRITESPANS_BATCH_SIZE),
-		}
-	}
-	return ing
-}
-
-func (ing *SpanIngestor) IngestSpan(span *common.Span) {
-	ing.totalIngested++
-	// Make sure the span ID is valid.
-	spanIdProblem := span.Id.FindProblem()
-	if spanIdProblem != "" {
-		// Can't print the invalid span ID because String() might fail.
-		ing.lg.Warnf("Invalid span ID: %s\n", spanIdProblem)
-		ing.serverDropped++
-		return
-	}
-
-	// Set the default tracer id, if needed.
-	if span.TracerId == "" {
-		span.TracerId = ing.defaultTrid
-	}
-
-	// Encode the span data.  Doing the encoding here is better than doing it
-	// in the shard goroutine, because we can achieve more parallelism.
-	// There is one shard goroutine per shard, but potentially many more
-	// ingestors per shard.
-	err := ing.enc.Encode(span.SpanData)
-	if err != nil {
-		ing.lg.Warnf("Failed to encode span ID %s: %s\n",
-			span.Id.String(), err.Error())
-		ing.serverDropped++
-		return
-	}
-	spanDataBytes := ing.spanDataBytes
-	ing.spanDataBytes = make([]byte, 0, 1024)
-	ing.enc.ResetBytes(&ing.spanDataBytes)
-
-	// Determine which shard this span should go to.
-	shardIdx := ing.store.getShardIndex(span.Id)
-	batch := ing.batches[shardIdx]
-	incomingLen := len(batch.incoming)
-	if ing.lg.TraceEnabled() {
-		ing.lg.Tracef("SpanIngestor#IngestSpan: spanId=%s, shardIdx=%d, "+
-			"incomingLen=%d, cap(batch.incoming)=%d\n",
-			span.Id.String(), shardIdx, incomingLen, cap(batch.incoming))
-	}
-	if incomingLen+1 == cap(batch.incoming) {
-		if ing.lg.TraceEnabled() {
-			ing.lg.Tracef("SpanIngestor#IngestSpan: flushing %d spans for "+
-				"shard %d\n", len(batch.incoming), shardIdx)
-		}
-		ing.store.WriteSpans(shardIdx, batch.incoming)
-		batch.incoming = make([]*IncomingSpan, 1, WRITESPANS_BATCH_SIZE)
-		incomingLen = 0
-	} else {
-		batch.incoming = batch.incoming[0 : incomingLen+1]
-	}
-	batch.incoming[incomingLen] = &IncomingSpan{
-		Addr:          ing.addr,
-		Span:          span,
-		SpanDataBytes: spanDataBytes,
-	}
-}
-
-func (ing *SpanIngestor) Close(startTime time.Time) {
-	for shardIdx := range ing.batches {
-		batch := ing.batches[shardIdx]
-		if len(batch.incoming) > 0 {
-			if ing.lg.TraceEnabled() {
-				ing.lg.Tracef("SpanIngestor#Close: flushing %d span(s) for "+
-					"shard %d\n", len(batch.incoming), shardIdx)
-			}
-			ing.store.WriteSpans(shardIdx, batch.incoming)
-		}
-		batch.incoming = nil
-	}
-	ing.lg.Debugf("Closed span ingestor for %s.  Ingested %d span(s); dropped "+
-		"%d span(s).\n", ing.addr, ing.totalIngested, ing.serverDropped)
-
-	endTime := time.Now()
-	ing.store.msink.UpdateIngested(ing.addr, ing.totalIngested,
-		ing.serverDropped, endTime.Sub(startTime))
-}
-
-func (store *dataStore) WriteSpans(shardIdx int, ispans []*IncomingSpan) {
-	store.shards[shardIdx].incoming <- ispans
-}
-
-func (store *dataStore) FindSpan(sid common.SpanId) *common.Span {
-	return store.shards[store.getShardIndex(sid)].FindSpan(sid)
-}
-
-func (shd *shard) FindSpan(sid common.SpanId) *common.Span {
-	lg := shd.store.lg
-	primaryKey := append([]byte{SPAN_ID_INDEX_PREFIX}, sid.Val()...)
-	buf, err := shd.ldb.Get(shd.store.readOpts, primaryKey)
-	if err != nil {
-		if strings.Index(err.Error(), "NotFound:") != -1 {
-			return nil
-		}
-		lg.Warnf("Shard(%s): FindSpan(%s) error: %s\n",
-			shd.path, sid.String(), err.Error())
-		return nil
-	}
-	var span *common.Span
-	span, err = shd.decodeSpan(sid, buf)
-	if err != nil {
-		lg.Errorf("Shard(%s): FindSpan(%s) decode error: %s decoding [%s]\n",
-			shd.path, sid.String(), err.Error(), hex.EncodeToString(buf))
-		return nil
-	}
-	return span
-}
-
-func (shd *shard) decodeSpan(sid common.SpanId, buf []byte) (*common.Span, error) {
-	r := bytes.NewBuffer(buf)
-	mh := new(codec.MsgpackHandle)
-	mh.WriteExt = true
-	decoder := codec.NewDecoder(r, mh)
-	data := common.SpanData{}
-	err := decoder.Decode(&data)
-	if err != nil {
-		return nil, err
-	}
-	if data.Parents == nil {
-		data.Parents = []common.SpanId{}
-	}
-	return &common.Span{Id: common.SpanId(sid), SpanData: data}, nil
-}
-
-// Find the children of a given span id.
-func (store *dataStore) FindChildren(sid common.SpanId, lim int32) []common.SpanId {
-	childIds := make([]common.SpanId, 0)
-	var err error
-
-	startIdx := store.getShardIndex(sid)
-	idx := startIdx
-	numShards := len(store.shards)
-	for {
-		if lim == 0 {
-			break
-		}
-		shd := store.shards[idx]
-		childIds, lim, err = shd.FindChildren(sid, childIds, lim)
-		if err != nil {
-			store.lg.Errorf("Shard(%s): FindChildren(%s) error: %s\n",
-				shd.path, sid.String(), err.Error())
-		}
-		idx++
-		if idx >= numShards {
-			idx = 0
-		}
-		if idx == startIdx {
-			break
-		}
-	}
-	return childIds
-}
-
-type predicateData struct {
-	*common.Predicate
-	key []byte
-}
-
-func loadPredicateData(pred *common.Predicate) (*predicateData, error) {
-	p := predicateData{Predicate: pred}
-
-	// Parse the input value given to make sure it matches up with the field
-	// type.
-	switch pred.Field {
-	case common.SPAN_ID:
-		// Span IDs are sent as hex strings.
-		var id common.SpanId
-		if err := id.FromString(pred.Val); err != nil {
-			return nil, errors.New(fmt.Sprintf("Unable to parse span id '%s': %s",
-				pred.Val, err.Error()))
-		}
-		p.key = id.Val()
-		break
-	case common.DESCRIPTION:
-		// Any string is valid for a description.
-		p.key = []byte(pred.Val)
-		break
-	case common.BEGIN_TIME, common.END_TIME, common.DURATION:
-		// Parse a base-10 signed numeric field.
-		v, err := strconv.ParseInt(pred.Val, 10, 64)
-		if err != nil {
-			return nil, errors.New(fmt.Sprintf("Unable to parse %s '%s': %s",
-				pred.Field, pred.Val, err.Error()))
-		}
-		p.key = u64toSlice(s2u64(v))
-		break
-	case common.TRACER_ID:
-		// Any string is valid for a tracer ID.
-		p.key = []byte(pred.Val)
-		break
-	default:
-		return nil, errors.New(fmt.Sprintf("Unknown field %s", pred.Field))
-	}
-
-	// Validate the predicate operation.
-	switch pred.Op {
-	case common.EQUALS, common.LESS_THAN_OR_EQUALS,
-		common.GREATER_THAN_OR_EQUALS, common.GREATER_THAN:
-		break
-	case common.CONTAINS:
-		if p.fieldIsNumeric() {
-			return nil, errors.New(fmt.Sprintf("Can't use CONTAINS on a "+
-				"numeric field like '%s'", pred.Field))
-		}
-	default:
-		return nil, errors.New(fmt.Sprintf("Unknown predicate operation '%s'",
-			pred.Op))
-	}
-
-	return &p, nil
-}
-
-// Get the index prefix for this predicate, or 0 if it is not indexed.
-func (pred *predicateData) getIndexPrefix() byte {
-	switch pred.Field {
-	case common.SPAN_ID:
-		return SPAN_ID_INDEX_PREFIX
-	case common.BEGIN_TIME:
-		return BEGIN_TIME_INDEX_PREFIX
-	case common.END_TIME:
-		return END_TIME_INDEX_PREFIX
-	case common.DURATION:
-		return DURATION_INDEX_PREFIX
-	default:
-		return INVALID_INDEX_PREFIX
-	}
-}
-
-// Returns true if the predicate type is numeric.
-func (pred *predicateData) fieldIsNumeric() bool {
-	switch pred.Field {
-	case common.SPAN_ID, common.BEGIN_TIME, common.END_TIME, common.DURATION:
-		return true
-	default:
-		return false
-	}
-}
-
-// Get the values that this predicate cares about for a given span.
-func (pred *predicateData) extractRelevantSpanData(span *common.Span) []byte {
-	switch pred.Field {
-	case common.SPAN_ID:
-		return span.Id.Val()
-	case common.DESCRIPTION:
-		return []byte(span.Description)
-	case common.BEGIN_TIME:
-		return u64toSlice(s2u64(span.Begin))
-	case common.END_TIME:
-		return u64toSlice(s2u64(span.End))
-	case common.DURATION:
-		return u64toSlice(s2u64(span.Duration()))
-	case common.TRACER_ID:
-		return []byte(span.TracerId)
-	default:
-		panic(fmt.Sprintf("Unknown field type %s.", pred.Field))
-	}
-}
-
-func (pred *predicateData) spanPtrIsBefore(a *common.Span, b *common.Span) bool {
-	// nil is after everything.
-	if a == nil {
-		if b == nil {
-			return false
-		}
-		return false
-	} else if b == nil {
-		return true
-	}
-	// Compare the spans according to this predicate.
-	aVal := pred.extractRelevantSpanData(a)
-	bVal := pred.extractRelevantSpanData(b)
-	cmp := bytes.Compare(aVal, bVal)
-	if pred.Op.IsDescending() {
-		return cmp > 0
-	} else {
-		return cmp < 0
-	}
-}
-
-type satisfiedByReturn int
-
-const (
-	NOT_SATISFIED satisfiedByReturn = iota
-	NOT_YET_SATISFIED = iota
-	SATISFIED = iota
-)
-
-func (r satisfiedByReturn) String() string {
-	switch (r) {
-	case NOT_SATISFIED:
-		return "NOT_SATISFIED"
-	case NOT_YET_SATISFIED:
-		return "NOT_YET_SATISFIED"
-	case SATISFIED:
-		return "SATISFIED"
-	default:
-		return "(unknown)"
-	}
-}
-
-// Determine whether the predicate is satisfied by the given span.
-func (pred *predicateData) satisfiedBy(span *common.Span) satisfiedByReturn {
-	val := pred.extractRelevantSpanData(span)
-	switch pred.Op {
-	case common.CONTAINS:
-		if bytes.Contains(val, pred.key) {
-			return SATISFIED
-		} else {
-			return NOT_SATISFIED
-		}
-	case common.EQUALS:
-		if bytes.Equal(val, pred.key) {
-			return SATISFIED
-		} else {
-			return NOT_SATISFIED
-		}
-	case common.LESS_THAN_OR_EQUALS:
-		if bytes.Compare(val, pred.key) <= 0 {
-			return SATISFIED
-		} else {
-			return NOT_YET_SATISFIED
-		}
-	case common.GREATER_THAN_OR_EQUALS:
-		if bytes.Compare(val, pred.key) >= 0 {
-			return SATISFIED
-		} else {
-			return NOT_SATISFIED
-		}
-	case common.GREATER_THAN:
-		cmp := bytes.Compare(val, pred.key)
-		if cmp <= 0 {
-			return NOT_YET_SATISFIED
-		} else {
-			return SATISFIED
-		}
-	default:
-		panic(fmt.Sprintf("unknown Op type %s should have been caught "+
-			"during normalization", pred.Op))
-	}
-}
-
-func (pred *predicateData) createSource(store *dataStore, prev *common.Span) (*source, error) {
-	var ret *source
-	src := source{store: store,
-		pred:      pred,
-		shards:    make([]*shard, len(store.shards)),
-		iters:     make([]*levigo.Iterator, 0, len(store.shards)),
-		nexts:     make([]*common.Span, len(store.shards)),
-		numRead:   make([]int, len(store.shards)),
-		keyPrefix: pred.getIndexPrefix(),
-	}
-	if src.keyPrefix == INVALID_INDEX_PREFIX {
-		return nil, errors.New(fmt.Sprintf("Can't create source from unindexed "+
-			"predicate on field %s", pred.Field))
-	}
-	defer func() {
-		if ret == nil {
-			src.Close()
-		}
-	}()
-	for shardIdx := range store.shards {
-		shd := store.shards[shardIdx]
-		src.shards[shardIdx] = shd
-		src.iters = append(src.iters, shd.ldb.NewIterator(store.readOpts))
-	}
-	var searchKey []byte
-	lg := store.lg
-	if prev != nil {
-		// If prev != nil, this query RPC is the continuation of a previous
-		// one.  The final result returned the last time is 'prev'.
-		//
-		// To avoid returning the same results multiple times, we adjust the
-		// predicate here.  If the predicate is on the span id field, we
-		// simply manipulate the span ID we're looking for.
-		//
-		// If the predicate is on a secondary index, we also use span ID, but
-		// in a slightly different way.  Since the secondary indices are
-		// organized as [type-code][8b-secondary-key][8b-span-id], elements
-		// with the same secondary index field are ordered by span ID.  So we
-		// create a 17-byte key incorporating the span ID from 'prev.'
-		startId := common.INVALID_SPAN_ID
-		switch pred.Op {
-		case common.EQUALS:
-			if pred.Field == common.SPAN_ID {
-				// This is an annoying corner case.  There can only be one
-				// result each time we do an EQUALS search for a span id.
-				// Span id is the primary key for all our spans.
-				// But for some reason someone is asking for another result.
-				// We modify the query to search for the illegal 0 span ID,
-				// which will never be present.
-				if lg.DebugEnabled() {
-					lg.Debugf("Attempted to use a continuation token with an EQUALS "+
-						"SPAN_ID query. %s.  Setting search id = 0",
-						pred.Predicate.String())
-				}
-				startId = common.INVALID_SPAN_ID
-			} else {
-				// When doing an EQUALS search on a secondary index, the
-				// results are sorted by span id.
-				startId = prev.Id.Next()
-			}
-		case common.LESS_THAN_OR_EQUALS:
-			// Subtract one from the previous span id.  Since the previous
-			// start ID will never be 0 (0 is an illegal span id), we'll never
-			// wrap around when doing this.
-			startId = prev.Id.Prev()
-		case common.GREATER_THAN_OR_EQUALS:
-			// We can't add one to the span id, since the previous span ID
-			// might be the maximum value.  So just switch over to using
-			// GREATER_THAN.
-			pred.Op = common.GREATER_THAN
-			startId = prev.Id
-		case common.GREATER_THAN:
-			// This one is easy.
-			startId = prev.Id
-		default:
-			str := fmt.Sprintf("Can't use a %v predicate as a source.", pred.Predicate.String())
-			lg.Error(str + "\n")
-			panic(str)
-		}
-		if pred.Field == common.SPAN_ID {
-			pred.key = startId.Val()
-			searchKey = append([]byte{src.keyPrefix}, startId.Val()...)
-		} else {
-			// Start where the previous query left off.  This means adjusting
-			// our uintKey.
-			pred.key = pred.extractRelevantSpanData(prev)
-			searchKey = append(append([]byte{src.keyPrefix}, pred.key...),
-				startId.Val()...)
-		}
-		if lg.TraceEnabled() {
-			lg.Tracef("Handling continuation token %s for %s.  startId=%d, "+
-				"pred.uintKey=%s\n", prev, pred.Predicate.String(), startId,
-				hex.EncodeToString(pred.key))
-		}
-	} else {
-		searchKey = append([]byte{src.keyPrefix}, pred.key...)
-	}
-	for i := range src.iters {
-		src.iters[i].Seek(searchKey)
-	}
-	ret = &src
-	return ret, nil
-}
-
-// A source of spans.
-type source struct {
-	store     *dataStore
-	pred      *predicateData
-	shards    []*shard
-	iters     []*levigo.Iterator
-	nexts     []*common.Span
-	numRead   []int
-	keyPrefix byte
-}
-
-func CreateReaperSource(shd *shard) (*source, error) {
-	store := shd.store
-	p := &common.Predicate{
-		Op:    common.GREATER_THAN_OR_EQUALS,
-		Field: common.BEGIN_TIME,
-		Val:   common.INVALID_SPAN_ID.String(),
-	}
-	pred, err := loadPredicateData(p)
-	if err != nil {
-		return nil, err
-	}
-	src := &source{
-		store:     store,
-		pred:      pred,
-		shards:    []*shard{shd},
-		iters:     make([]*levigo.Iterator, 1),
-		nexts:     make([]*common.Span, 1),
-		numRead:   make([]int, 1),
-		keyPrefix: pred.getIndexPrefix(),
-	}
-	iter := shd.ldb.NewIterator(store.readOpts)
-	src.iters[0] = iter
-	searchKey := append(append([]byte{src.keyPrefix}, pred.key...),
-		pred.key...)
-	iter.Seek(searchKey)
-	return src, nil
-}
-
-// Fill in the entry in the 'next' array for a specific shard.
-func (src *source) populateNextFromShard(shardIdx int) {
-	lg := src.store.lg
-	var err error
-	iter := src.iters[shardIdx]
-	shdPath := src.shards[shardIdx].path
-	if iter == nil {
-		lg.Debugf("Can't populate: No more entries in shard %s\n", shdPath)
-		return // There are no more entries in this shard.
-	}
-	if src.nexts[shardIdx] != nil {
-		lg.Debugf("No need to populate shard %s\n", shdPath)
-		return // We already have a valid entry for this shard.
-	}
-	for {
-		if !iter.Valid() {
-			lg.Debugf("Can't populate: Iterator for shard %s is no longer valid.\n", shdPath)
-			break // Can't read past end of DB
-		}
-		src.numRead[shardIdx]++
-		key := iter.Key()
-		if len(key) < 1 {
-			lg.Warnf("Encountered invalid zero-byte key in shard %s.\n", shdPath)
-			break
-		}
-		ret := src.checkKeyPrefix(key[0], iter)
-		if ret == NOT_SATISFIED {
-			break // Can't read past end of indexed section
-		} else if ret == NOT_YET_SATISFIED {
-			if src.pred.Op.IsDescending() {
-				iter.Prev()
-			} else {
-				iter.Next()
-			}
-			continue // Try again because we are not yet at the indexed section.
-		}
-		var span *common.Span
-		var sid common.SpanId
-		if src.keyPrefix == SPAN_ID_INDEX_PREFIX {
-			// The span id maps to the span itself.
-			sid = common.SpanId(key[1:17])
-			span, err = src.shards[shardIdx].decodeSpan(sid, iter.Value())
-			if err != nil {
-				if lg.DebugEnabled() {
-					lg.Debugf("Internal error decoding span %s in shard %s: %s\n",
-						sid.String(), shdPath, err.Error())
-				}
-				break
-			}
-		} else {
-			// With a secondary index, we have to look up the span by id.
-			sid = common.SpanId(key[9:25])
-			span = src.shards[shardIdx].FindSpan(sid)
-			if span == nil {
-				if lg.DebugEnabled() {
-					lg.Debugf("Internal error rehydrating span %s in shard %s\n",
-						sid.String(), shdPath)
-				}
-				break
-			}
-		}
-		if src.pred.Op.IsDescending() {
-			iter.Prev()
-		} else {
-			iter.Next()
-		}
-		ret = src.pred.satisfiedBy(span)
-		if ret == SATISFIED {
-			if lg.DebugEnabled() {
-				lg.Debugf("Populated valid span %v from shard %s.\n", sid, shdPath)
-			}
-			src.nexts[shardIdx] = span // Found valid entry
-			return
-		}
-		if ret == NOT_SATISFIED {
-			// This and subsequent entries don't satisfy predicate
-			break
-		}
-	}
-	lg.Debugf("Closing iterator for shard %s.\n", shdPath)
-	iter.Close()
-	src.iters[shardIdx] = nil
-}
-
-// Check the key prefix against the key prefix of the query.
-func (src *source) checkKeyPrefix(kp byte, iter *levigo.Iterator) satisfiedByReturn {
-	if kp == src.keyPrefix {
-		return SATISFIED
-	} else if kp < src.keyPrefix {
-		if src.pred.Op.IsDescending() {
-			return NOT_SATISFIED
-		} else {
-			return NOT_YET_SATISFIED
-		}
-	} else {
-		if src.pred.Op.IsDescending() {
-			return NOT_YET_SATISFIED
-		} else {
-			return NOT_SATISFIED
-		}
-	}
-}
-
-
-func (src *source) next() *common.Span {
-	for shardIdx := range src.shards {
-		src.populateNextFromShard(shardIdx)
-	}
-	var best *common.Span
-	bestIdx := -1
-	for shardIdx := range src.iters {
-		span := src.nexts[shardIdx]
-		if src.pred.spanPtrIsBefore(span, best) {
-			best = span
-			bestIdx = shardIdx
-		}
-	}
-	if bestIdx >= 0 {
-		src.nexts[bestIdx] = nil
-	}
-	return best
-}
-
-func (src *source) Close() {
-	for i := range src.iters {
-		if src.iters[i] != nil {
-			src.iters[i].Close()
-		}
-	}
-	src.iters = nil
-}
-
-func (src *source) getStats() string {
-	ret := fmt.Sprintf("Source stats: pred = %s", src.pred.String())
-	prefix := ". "
-	for shardIdx := range src.shards {
-		next := fmt.Sprintf("%sRead %d spans from %s", prefix,
-			src.numRead[shardIdx], src.shards[shardIdx].path)
-		prefix = ", "
-		ret = ret + next
-	}
-	return ret
-}
-
-func (store *dataStore) obtainSource(preds *[]*predicateData, span *common.Span) (*source, error) {
-	// Read spans from the first predicate that is indexed.
-	p := *preds
-	for i := range p {
-		pred := p[i]
-		if pred.getIndexPrefix() != INVALID_INDEX_PREFIX {
-			*preds = append(p[0:i], p[i+1:]...)
-			return pred.createSource(store, span)
-		}
-	}
-	// If there are no predicates that are indexed, read rows in order of span id.
-	spanIdPred := common.Predicate{Op: common.GREATER_THAN_OR_EQUALS,
-		Field: common.SPAN_ID,
-		Val:   common.INVALID_SPAN_ID.String(),
-	}
-	spanIdPredData, err := loadPredicateData(&spanIdPred)
-	if err != nil {
-		return nil, err
-	}
-	return spanIdPredData.createSource(store, span)
-}
-
-func (store *dataStore) HandleQuery(query *common.Query) ([]*common.Span, error, []int) {
-	lg := store.lg
-	// Parse predicate data.
-	var err error
-	preds := make([]*predicateData, len(query.Predicates))
-	for i := range query.Predicates {
-		preds[i], err = loadPredicateData(&query.Predicates[i])
-		if err != nil {
-			return nil, err, nil
-		}
-	}
-	// Get a source of rows.
-	var src *source
-	src, err = store.obtainSource(&preds, query.Prev)
-	if err != nil {
-		return nil, err, nil
-	}
-	defer src.Close()
-	if lg.DebugEnabled() {
-		lg.Debugf("HandleQuery %s: preds = %s, src = %v\n", query, preds, src)
-	}
-
-	// Filter the spans through the remaining predicates.
-	reserved := 32
-	if query.Lim < reserved {
-		reserved = query.Lim
-	}
-	ret := make([]*common.Span, 0, reserved)
-	for {
-		if len(ret) >= query.Lim {
-			if lg.DebugEnabled() {
-				lg.Debugf("HandleQuery %s: hit query limit after obtaining " +
-					"%d results. %s\n.", query, query.Lim, src.getStats())
-			}
-			break // we hit the result size limit
-		}
-		span := src.next()
-		if span == nil {
-			if lg.DebugEnabled() {
-				lg.Debugf("HandleQuery %s: found %d result(s), which are " +
-					"all that exist. %s\n", query, len(ret), src.getStats())
-			}
-			break // the source has no more spans to give
-		}
-		if lg.DebugEnabled() {
-			lg.Debugf("src.next returned span %s\n", span.ToJson())
-		}
-		satisfied := true
-		for predIdx := range preds {
-			if preds[predIdx].satisfiedBy(span) != SATISFIED {
-				satisfied = false
-				break
-			}
-		}
-		if satisfied {
-			ret = append(ret, span)
-		}
-	}
-	return ret, nil, src.numRead
-}
-
-func (store *dataStore) ServerStats() *common.ServerStats {
-	serverStats := common.ServerStats{
-		Dirs: make([]common.StorageDirectoryStats, len(store.shards)),
-	}
-	for shardIdx := range store.shards {
-		shard := store.shards[shardIdx]
-		serverStats.Dirs[shardIdx].Path = shard.path
-		r := levigo.Range{
-			Start: []byte{0},
-			Limit: []byte{0xff},
-		}
-		vals := shard.ldb.GetApproximateSizes([]levigo.Range{r})
-		serverStats.Dirs[shardIdx].ApproximateBytes = vals[0]
-		serverStats.Dirs[shardIdx].LevelDbStats =
-			shard.ldb.PropertyValue("leveldb.stats")
-		store.msink.lg.Debugf("levedb.stats for %s: %s\n",
-			shard.path, shard.ldb.PropertyValue("leveldb.stats"))
-	}
-	serverStats.LastStartMs = store.startMs
-	serverStats.CurMs = common.TimeToUnixMs(time.Now().UTC())
-	serverStats.ReapedSpans = atomic.LoadUint64(&store.rpr.ReapedSpans)
-	store.msink.PopulateServerStats(&serverStats)
-	return &serverStats
-}