You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficcontrol.apache.org by ra...@apache.org on 2018/11/26 22:29:13 UTC

[trafficcontrol] branch master updated: Change TM ds.CreateStats to use pointers

This is an automated email from the ASF dual-hosted git repository.

rawlin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficcontrol.git


The following commit(s) were added to refs/heads/master by this push:
     new dd078fa  Change TM ds.CreateStats to use pointers
dd078fa is described below

commit dd078fa4f4e24895b2b6c5b10391be0226aab3c7
Author: Robert Butts <ro...@apache.org>
AuthorDate: Tue Nov 6 15:42:15 2018 -0700

    Change TM ds.CreateStats to use pointers
    
    Improves performance, reduces GC pressure.
---
 traffic_monitor/cache/data_test.go              |  21 ---
 traffic_monitor/ds/stat.go                      | 220 ++++++++++++++----------
 traffic_monitor/ds/stat_test.go                 |  20 ++-
 traffic_monitor/dsdata/stat.go                  | 122 ++++++-------
 traffic_monitor/manager/stat.go                 |  33 +++-
 traffic_monitor/threadsafe/dsstats.go           |   3 +-
 traffic_monitor/threadsafe/lastkbpsstats.go     |   7 +-
 traffic_monitor/threadsafe/resultstathistory.go |   1 +
 8 files changed, 228 insertions(+), 199 deletions(-)

diff --git a/traffic_monitor/cache/data_test.go b/traffic_monitor/cache/data_test.go
index 0521d423..aa04067 100644
--- a/traffic_monitor/cache/data_test.go
+++ b/traffic_monitor/cache/data_test.go
@@ -151,27 +151,6 @@ func randStatCommon() dsdata.StatCommon {
 	}
 }
 
-func randDsStat() dsdata.Stat {
-	num := 5
-	cacheGroups := map[tc.CacheGroupName]dsdata.StatCacheStats{}
-	types := map[tc.CacheType]dsdata.StatCacheStats{}
-	caches := map[tc.CacheName]dsdata.StatCacheStats{}
-	cachesTime := map[tc.CacheName]time.Time{}
-	for i := 0; i < num; i++ {
-		cacheGroups[tc.CacheGroupName(randStr())] = randStatCacheStats()
-		types[tc.CacheType(randStr())] = randStatCacheStats()
-		cachesTime[tc.CacheName(randStr())] = time.Now()
-	}
-
-	return dsdata.Stat{
-		CommonStats: randStatCommon(),
-		CacheGroups: cacheGroups,
-		Types:       types,
-		Caches:      caches,
-		TotalStats:  randStatCacheStats(),
-	}
-}
-
 func randAStat() *AStat {
 	return &AStat{
 		InBytes:   rand.Uint64(),
diff --git a/traffic_monitor/ds/stat.go b/traffic_monitor/ds/stat.go
index 1f0f9e1..237876a 100644
--- a/traffic_monitor/ds/stat.go
+++ b/traffic_monitor/ds/stat.go
@@ -35,15 +35,13 @@ import (
 
 // TODO remove 'ds' and 'stat' from names
 
-func setStaticData(dsStats dsdata.Stats, dsServers map[tc.DeliveryServiceName][]tc.CacheName) dsdata.Stats {
+func setStaticData(dsStats *dsdata.Stats, dsServers map[tc.DeliveryServiceName][]tc.CacheName) {
 	for ds, stat := range dsStats.DeliveryService {
 		stat.CommonStats.CachesConfiguredNum.Value = int64(len(dsServers[ds]))
-		dsStats.DeliveryService[ds] = stat // TODO consider changing dsStats.DeliveryService[ds] to a pointer so this kind of thing isn't necessary; possibly more performant, as well
 	}
-	return dsStats
 }
 
-func addAvailableData(dsStats dsdata.Stats, crStates tc.CRStates, serverCachegroups map[tc.CacheName]tc.CacheGroupName, serverDs map[tc.CacheName][]tc.DeliveryServiceName, serverTypes map[tc.CacheName]tc.CacheType, precomputed map[tc.CacheName]cache.PrecomputedData, lastStats dsdata.LastStats, events health.ThreadsafeEvents) (dsdata.Stats, error) {
+func addAvailableData(dsStats *dsdata.Stats, crStates tc.CRStates, serverCachegroups map[tc.CacheName]tc.CacheGroupName, serverDs map[tc.CacheName][]tc.DeliveryServiceName, serverTypes map[tc.CacheName]tc.CacheType, precomputed map[tc.CacheName]cache.PrecomputedData, lastStats *dsdata.LastStats, events health.ThreadsafeEvents) {
 	for cache, available := range crStates.Caches {
 		cacheGroup, ok := serverCachegroups[cache]
 		if !ok {
@@ -78,10 +76,17 @@ func addAvailableData(dsStats dsdata.Stats, crStates tc.CRStates, serverCachegro
 				stat.CommonStats.IsHealthy.Value = true
 				stat.CommonStats.CachesAvailableNum.Value++
 				cacheGroupStats := stat.CacheGroups[cacheGroup]
+				if cacheGroupStats == nil {
+					cacheGroupStats = &dsdata.StatCacheStats{} // TODO sync.Pool?
+					stat.CacheGroups[cacheGroup] = cacheGroupStats
+				}
 				cacheGroupStats.IsAvailable.Value = true
-				stat.CacheGroups[cacheGroup] = cacheGroupStats
 				stat.TotalStats.IsAvailable.Value = true
 				typeStats := stat.Types[cacheType]
+				if typeStats == nil {
+					typeStats = &dsdata.StatCacheStats{} // TODO sync.Pool?
+					stat.Types[cacheType] = cacheGroupStats
+				}
 				typeStats.IsAvailable.Value = true
 				stat.Types[cacheType] = typeStats
 			}
@@ -96,8 +101,6 @@ func addAvailableData(dsStats dsdata.Stats, crStates tc.CRStates, serverCachegro
 			} else {
 				log.Debugf("no result for %v %v\n", cache, deliveryService)
 			}
-
-			dsStats.DeliveryService[deliveryService] = stat // TODO Necessary? Remove?
 		}
 	}
 
@@ -108,6 +111,7 @@ func addAvailableData(dsStats dsdata.Stats, crStates tc.CRStates, serverCachegro
 		}
 
 		getEvent := func(desc string) health.Event {
+			// TODO sync.Pool?
 			return health.Event{
 				Time:        health.Time(time.Now()),
 				Description: desc,
@@ -137,26 +141,26 @@ func addAvailableData(dsStats dsdata.Stats, crStates tc.CRStates, serverCachegro
 		for i, v := range ds.DisabledLocations {
 			stat.CommonStats.CachesDisabled[i] = string(v)
 		}
-		dsStats.DeliveryService[dsName] = stat // TODO Necessary? Remove?
 	}
-
-	return dsStats, nil
 }
 
-func newLastDSStat() dsdata.LastDSStat {
-	return dsdata.LastDSStat{
-		CacheGroups: map[tc.CacheGroupName]dsdata.LastStatsData{},
-		Type:        map[tc.CacheType]dsdata.LastStatsData{},
-		Caches:      map[tc.CacheName]dsdata.LastStatsData{},
+func newLastDSStat() *dsdata.LastDSStat {
+	return &dsdata.LastDSStat{
+		CacheGroups: map[tc.CacheGroupName]*dsdata.LastStatsData{},
+		Type:        map[tc.CacheType]*dsdata.LastStatsData{},
+		Caches:      map[tc.CacheName]*dsdata.LastStatsData{},
 	}
 }
 
 // BytesPerKilobit is the number of bytes in a kilobit.
 const BytesPerKilobit = 125
 
-func addLastStat(lastData dsdata.LastStatData, newStat int64, newStatTime time.Time) (dsdata.LastStatData, error) {
+// Adds the new stat to lastData.
+// Note this mutates lastData, adding the new stat to it.
+// Also note that lastData may be mutated, even if an error occurs. Specifically, if the new stat is less than the last stat, it will still be set, so that the per-second stats will be properly computed on the next poll.
+func addLastStat(lastData *dsdata.LastStatData, newStat int64, newStatTime time.Time) error {
 	if lastData.Time == newStatTime {
-		return lastData, nil // TODO fix callers to not pass the same stat twice
+		return nil // TODO fix callers to not pass the same stat twice
 	}
 
 	if newStat < lastData.Stat {
@@ -164,11 +168,11 @@ func addLastStat(lastData dsdata.LastStatData, newStat int64, newStatTime time.T
 		err := fmt.Errorf("new stat '%d'@'%v' value less than last stat '%d'@'%v'", newStat, newStatTime, lastData.Stat, lastData.Time)
 		lastData.Stat = newStat
 		lastData.Time = newStatTime
-		return lastData, err
+		return err
 	}
 
 	if newStatTime.Before(lastData.Time) {
-		return lastData, fmt.Errorf("new stat '%d'@'%v' time less than last stat '%d'@'%v'", newStat, newStatTime, lastData.Stat, lastData.Time)
+		return fmt.Errorf("new stat '%d'@'%v' time less than last stat '%d'@'%v'", newStat, newStatTime, lastData.Stat, lastData.Time)
 	}
 
 	if lastData.Stat != 0 {
@@ -177,33 +181,37 @@ func addLastStat(lastData dsdata.LastStatData, newStat int64, newStatTime time.T
 
 	lastData.Stat = newStat
 	lastData.Time = newStatTime
-	return lastData, nil
+	return nil
 }
 
-func addLastStats(lastData dsdata.LastStatsData, newStats dsdata.StatCacheStats, newStatsTime time.Time) (dsdata.LastStatsData, error) {
-	errs := []error{nil, nil, nil, nil, nil}
-	lastData.Bytes, errs[0] = addLastStat(lastData.Bytes, newStats.OutBytes.Value, newStatsTime)
-	lastData.Status2xx, errs[1] = addLastStat(lastData.Status2xx, newStats.Status2xx.Value, newStatsTime)
-	lastData.Status3xx, errs[2] = addLastStat(lastData.Status3xx, newStats.Status3xx.Value, newStatsTime)
-	lastData.Status4xx, errs[3] = addLastStat(lastData.Status4xx, newStats.Status4xx.Value, newStatsTime)
-	lastData.Status5xx, errs[4] = addLastStat(lastData.Status5xx, newStats.Status5xx.Value, newStatsTime)
-	return lastData, util.JoinErrs(errs)
+// addLastStats adds the new stats to the lastData.
+// Note lastData is mutated, with the new stats added to it.
+func addLastStats(lastData *dsdata.LastStatsData, newStats *dsdata.StatCacheStats, newStatsTime time.Time) error {
+	return util.JoinErrs([]error{
+		addLastStat(&lastData.Bytes, newStats.OutBytes.Value, newStatsTime),
+		addLastStat(&lastData.Status2xx, newStats.Status2xx.Value, newStatsTime),
+		addLastStat(&lastData.Status3xx, newStats.Status3xx.Value, newStatsTime),
+		addLastStat(&lastData.Status4xx, newStats.Status4xx.Value, newStatsTime),
+		addLastStat(&lastData.Status5xx, newStats.Status5xx.Value, newStatsTime),
+	})
 }
 
-func addLastStatsToStatCacheStats(s dsdata.StatCacheStats, l dsdata.LastStatsData) dsdata.StatCacheStats {
+// addLastStatsToStatCacheStats adds the given LastStatsData to the given StatCacheStats.
+// Note s is mutated, with l being added to it.
+func addLastStatsToStatCacheStats(s *dsdata.StatCacheStats, l *dsdata.LastStatsData) {
 	s.Kbps.Value = l.Bytes.PerSec / BytesPerKilobit
 	s.Tps2xx.Value = l.Status2xx.PerSec
 	s.Tps3xx.Value = l.Status3xx.PerSec
 	s.Tps4xx.Value = l.Status4xx.PerSec
 	s.Tps5xx.Value = l.Status5xx.PerSec
 	s.TpsTotal.Value = s.Tps2xx.Value + s.Tps3xx.Value + s.Tps4xx.Value + s.Tps5xx.Value
-	return s
 }
 
 // addLastDSStatTotals takes a LastDSStat with only raw `Caches` data, and calculates and sets the `CacheGroups`, `Type`, and `Total` data, and returns the augmented structure.
-func addLastDSStatTotals(lastStat dsdata.LastDSStat, cachesReporting map[tc.CacheName]bool, serverCachegroups map[tc.CacheName]tc.CacheGroupName, serverTypes map[tc.CacheName]tc.CacheType) dsdata.LastDSStat {
-	cacheGroups := map[tc.CacheGroupName]dsdata.LastStatsData{}
-	cacheTypes := map[tc.CacheType]dsdata.LastStatsData{}
+// Note lastStat is mutated, with the calculated values being set in it.
+func addLastDSStatTotals(lastStat *dsdata.LastDSStat, cachesReporting map[tc.CacheName]bool, serverCachegroups map[tc.CacheName]tc.CacheGroupName, serverTypes map[tc.CacheName]tc.CacheType) {
+	cacheGroups := map[tc.CacheGroupName]*dsdata.LastStatsData{}
+	cacheTypes := map[tc.CacheType]*dsdata.LastStatsData{}
 	total := dsdata.LastStatsData{}
 	for cacheName, cacheStats := range lastStat.Caches {
 		if !cachesReporting[cacheName] {
@@ -211,67 +219,79 @@ func addLastDSStatTotals(lastStat dsdata.LastDSStat, cachesReporting map[tc.Cach
 		}
 
 		if cacheGroup, ok := serverCachegroups[cacheName]; ok {
-			cacheGroups[cacheGroup] = cacheGroups[cacheGroup].Sum(cacheStats)
+			cgStat := cacheGroups[cacheGroup]
+			if cgStat == nil {
+				cgStat = &dsdata.LastStatsData{}
+				cacheGroups[cacheGroup] = cgStat
+			}
+			cgStat.Sum(cacheStats)
 		} else {
 			log.Warnf("while computing delivery service data, cache %v not in cachegroups\n", cacheName)
 		}
 
 		if cacheType, ok := serverTypes[cacheName]; ok {
-			cacheTypes[cacheType] = cacheTypes[cacheType].Sum(cacheStats)
+			cacheTypeStat := cacheTypes[cacheType]
+			if cacheTypeStat == nil {
+				cacheTypeStat = &dsdata.LastStatsData{}
+				cacheTypes[cacheType] = cacheTypeStat
+			}
+			cacheTypeStat.Sum(cacheStats)
 		} else {
 			log.Warnf("while computing delivery service data, cache %v not in types\n", cacheName)
 		}
-		total = total.Sum(cacheStats)
+		total.Sum(cacheStats)
 	}
 	lastStat.CacheGroups = cacheGroups
 	lastStat.Type = cacheTypes
 	lastStat.Total = total
-	return lastStat
 }
 
-// addDSPerSecStats calculates and adds the per-second delivery service stats to both the Stats and LastStats structures, and returns the augmented structures.
-func addDSPerSecStats(dsName tc.DeliveryServiceName, stat dsdata.Stat, lastStats dsdata.LastStats, dsStats dsdata.Stats, serverCachegroups map[tc.CacheName]tc.CacheGroupName, serverTypes map[tc.CacheName]tc.CacheType, mc tc.TrafficMonitorConfigMap, events health.ThreadsafeEvents, precomputed map[tc.CacheName]cache.PrecomputedData, states peer.CRStatesThreadsafe) (dsdata.Stats, dsdata.LastStats) {
-	err := error(nil)
+// addDSPerSecStats calculates and adds the per-second delivery service stats to both the Stats and LastStats structures.
+// Note this mutates both dsStats and lastStats, adding the per-second stats to them.
+func addDSPerSecStats(lastStats *dsdata.LastStats, dsStats *dsdata.Stats, dsName tc.DeliveryServiceName, stat *dsdata.Stat, serverCachegroups map[tc.CacheName]tc.CacheGroupName, serverTypes map[tc.CacheName]tc.CacheType, mc tc.TrafficMonitorConfigMap, events health.ThreadsafeEvents, precomputed map[tc.CacheName]cache.PrecomputedData, states peer.CRStatesThreadsafe) {
 	lastStat, lastStatExists := lastStats.DeliveryServices[dsName]
 	if !lastStatExists {
-		lastStat = newLastDSStat()
+		lastStat = newLastDSStat() // TODO sync.Pool?
+		lastStats.DeliveryServices[dsName] = lastStat
 	}
-
 	for cacheName, cacheStats := range stat.Caches {
+		lastStatCache := lastStat.Caches[cacheName]
+		if lastStatCache == nil {
+			lastStatCache = &dsdata.LastStatsData{}
+			lastStat.Caches[cacheName] = lastStatCache
+		}
 		if _, ok := precomputed[cacheName]; ok {
-			lastStat.Caches[cacheName], err = addLastStats(lastStat.Caches[cacheName], cacheStats, precomputed[cacheName].Time)
-			if err != nil {
+			if err := addLastStats(lastStatCache, cacheStats, precomputed[cacheName].Time); err != nil {
 				log.Warnf("%v adding per-second stats for cache %v: %v", dsName, cacheName, err)
 				continue
 			}
 		}
 		cacheStats.Kbps.Value = lastStat.Caches[cacheName].Bytes.PerSec / BytesPerKilobit
-		stat.Caches[cacheName] = cacheStats
 	}
 
-	lastStat = addLastDSStatTotals(lastStat, stat.CommonStats.CachesReporting, serverCachegroups, serverTypes)
+	addLastDSStatTotals(lastStat, stat.CommonStats.CachesReporting, serverCachegroups, serverTypes)
 
 	for cacheGroup, cacheGroupStat := range lastStat.CacheGroups {
-		stat.CacheGroups[cacheGroup] = addLastStatsToStatCacheStats(stat.CacheGroups[cacheGroup], cacheGroupStat)
+		addLastStatsToStatCacheStats(stat.CacheGroups[cacheGroup], cacheGroupStat)
 	}
 	for cacheType, cacheTypeStat := range lastStat.Type {
-		stat.Types[cacheType] = addLastStatsToStatCacheStats(stat.Types[cacheType], cacheTypeStat)
+		addLastStatsToStatCacheStats(stat.Types[cacheType], cacheTypeStat)
 	}
-	stat.TotalStats = addLastStatsToStatCacheStats(stat.TotalStats, lastStat.Total)
+	addLastStatsToStatCacheStats(&stat.TotalStats, &lastStat.Total)
 
 	dsErr := getDSErr(dsName, stat.TotalStats, mc)
 	if dsErr != nil {
 		stat.CommonStats.IsAvailable.Value = false
 		stat.CommonStats.IsHealthy.Value = false
 		stat.CommonStats.ErrorStr.Value = dsErr.Error()
-
 	}
 	//it's ok to ignore the 'ok' return here.  If the DS doesn't exist, an empty struct will be returned and we can use it.
 	dsState, _ := states.GetDeliveryService(dsName)
 	dsState.IsAvailable = stat.CommonStats.IsAvailable.Value
-	states.SetDeliveryService(dsName, dsState)
+	states.SetDeliveryService(dsName, dsState) // TODO sync.Map? Determine if slow.
 
 	getEvent := func(desc string) health.Event {
+		// TODO sync.Pool?
 		return health.Event{
 			Time:        health.Time(time.Now()),
 			Description: desc,
@@ -282,16 +302,12 @@ func addDSPerSecStats(dsName tc.DeliveryServiceName, stat dsdata.Stat, lastStats
 		}
 	}
 	if stat.CommonStats.IsAvailable.Value == false && lastStat.Available == true && dsErr != nil {
-		events.Add(getEvent(dsErr.Error()))
+		events.Add(getEvent(dsErr.Error())) // TODO change events.Add to not allocate new memory, after the limit is reached.
 	} else if stat.CommonStats.IsAvailable.Value == true && lastStat.Available == false {
 		events.Add(getEvent("REPORTED - available"))
 	}
 
 	lastStat.Available = stat.CommonStats.IsAvailable.Value
-
-	lastStats.DeliveryServices[dsName] = lastStat
-	dsStats.DeliveryService[dsName] = stat
-	return dsStats, lastStats
 }
 
 // latestBytes returns the most recent OutBytes from the given cache results, and the time of that result. It assumes zero results are not valid, but nonzero results with errors are valid.
@@ -302,21 +318,21 @@ func latestBytes(p cache.PrecomputedData) (int64, time.Time, error) {
 	return p.OutBytes, p.Time, nil
 }
 
-// addCachePerSecStats calculates the cache per-second stats, adds them to LastStats, and returns the augmented object.
-func addCachePerSecStats(cacheName tc.CacheName, precomputed cache.PrecomputedData, lastStats dsdata.LastStats) dsdata.LastStats {
+// addCachePerSecStats calculates the cache per-second stats, adds them to LastStats.
+// Note this mutates lastStats, adding the calculated per-second stats to it.
+func addCachePerSecStats(lastStats *dsdata.LastStats, cacheName tc.CacheName, precomputed cache.PrecomputedData) {
 	outBytes, outBytesTime, err := latestBytes(precomputed) // it's ok if `latestBytes` returns 0s with an error, `addLastStat` will refrain from setting it (unless the previous calculation was nonzero, in which case it will error appropriately).
 	if err != nil {
 		log.Warnf("while computing delivery service data for cache %v: %v\n", cacheName, err)
 	}
-	lastStat := lastStats.Caches[cacheName] // if lastStats.Caches[cacheName] doesn't exist, it will be zero-constructed, and `addLastStat` will refrain from setting the PerSec for zero LastStats
-	lastStat.Bytes, err = addLastStat(lastStat.Bytes, outBytes, outBytesTime)
-	if err != nil {
+	lastStat, ok := lastStats.Caches[cacheName] // if lastStats.Caches[cacheName] doesn't exist, it will be zero-constructed, and `addLastStat` will refrain from setting the PerSec for zero LastStats
+	if !ok {
+		lastStat = &dsdata.LastStatsData{}
+		lastStats.Caches[cacheName] = lastStat
+	}
+	if err = addLastStat(&lastStat.Bytes, outBytes, outBytesTime); err != nil {
 		log.Warnf("while computing delivery service data for cache %v: %v\n", cacheName, err)
-		return lastStats
 	}
-	lastStats.Caches[cacheName] = lastStat
-
-	return lastStats
 }
 
 // addPerSecStats adds Kbps fields to the NewStats, based on the previous out_bytes in the oldStats, and the time difference.
@@ -326,34 +342,32 @@ func addCachePerSecStats(cacheName tc.CacheName, precomputed cache.PrecomputedDa
 // we set the (new - old) / lastChangedTime as the KBPS, and update the recorded LastChangedTime and LastChangedValue
 //
 // TODO handle ATS byte rolling (when the `out_bytes` overflows back to 0)
-func addPerSecStats(precomputed map[tc.CacheName]cache.PrecomputedData, dsStats dsdata.Stats, lastStats dsdata.LastStats, serverCachegroups map[tc.CacheName]tc.CacheGroupName, serverTypes map[tc.CacheName]tc.CacheType, mc tc.TrafficMonitorConfigMap, events health.ThreadsafeEvents, states peer.CRStatesThreadsafe) (dsdata.Stats, dsdata.LastStats) {
+//
+// Note this mutates both dsStats and lastStats, adding the per-second stats to them.
+//
+func addPerSecStats(precomputed map[tc.CacheName]cache.PrecomputedData, dsStats *dsdata.Stats, lastStats *dsdata.LastStats, serverCachegroups map[tc.CacheName]tc.CacheGroupName, serverTypes map[tc.CacheName]tc.CacheType, mc tc.TrafficMonitorConfigMap, events health.ThreadsafeEvents, states peer.CRStatesThreadsafe) {
 	for dsName, stat := range dsStats.DeliveryService {
-		dsStats, lastStats = addDSPerSecStats(dsName, stat, lastStats, dsStats, serverCachegroups, serverTypes, mc, events, precomputed, states)
+		addDSPerSecStats(lastStats, dsStats, dsName, stat, serverCachegroups, serverTypes, mc, events, precomputed, states)
 	}
 	for cacheName, precomputedData := range precomputed {
-		lastStats = addCachePerSecStats(cacheName, precomputedData, lastStats)
+		addCachePerSecStats(lastStats, cacheName, precomputedData)
 	}
-
-	return dsStats, lastStats
 }
 
 // CreateStats aggregates and creates statistics from given precomputed stat history. It returns the created stats, information about these stats necessary for the next calculation, and any error.
-func CreateStats(precomputed map[tc.CacheName]cache.PrecomputedData, toData todata.TOData, crStates tc.CRStates, lastStats dsdata.LastStats, now time.Time, mc tc.TrafficMonitorConfigMap, events health.ThreadsafeEvents, states peer.CRStatesThreadsafe) (dsdata.Stats, dsdata.LastStats, error) {
+// Note lastStats is mutated, being set with the new last stats.
+func CreateStats(precomputed map[tc.CacheName]cache.PrecomputedData, toData todata.TOData, crStates tc.CRStates, lastStats *dsdata.LastStats, now time.Time, mc tc.TrafficMonitorConfigMap, events health.ThreadsafeEvents, states peer.CRStatesThreadsafe) (*dsdata.Stats, error) {
 	start := time.Now()
-	dsStats := dsdata.NewStats()
+	dsStats := dsdata.NewStats(len(toData.DeliveryServiceServers)) // TODO sync.Pool?
 	for deliveryService := range toData.DeliveryServiceServers {
 		if deliveryService == "" {
 			log.Errorf("EMPTY CreateStats deliveryService")
 			continue
 		}
-		dsStats.DeliveryService[deliveryService] = *dsdata.NewStat()
-	}
-	dsStats = setStaticData(dsStats, toData.DeliveryServiceServers)
-	var err error
-	dsStats, err = addAvailableData(dsStats, crStates, toData.ServerCachegroups, toData.ServerDeliveryServices, toData.ServerTypes, precomputed, lastStats, events) // TODO move after stat summarisation
-	if err != nil {
-		return dsStats, lastStats, fmt.Errorf("Error getting Cache availability data: %v", err)
+		dsStats.DeliveryService[deliveryService] = dsdata.NewStat() // TODO sync.Pool?
 	}
+	setStaticData(dsStats, toData.DeliveryServiceServers)
+	addAvailableData(dsStats, crStates, toData.ServerCachegroups, toData.ServerDeliveryServices, toData.ServerTypes, precomputed, lastStats, events) // TODO move after stat summarisation
 
 	for server, precomputedData := range precomputed {
 		cachegroup, ok := toData.ServerCachegroups[server]
@@ -374,24 +388,42 @@ func CreateStats(precomputed map[tc.CacheName]cache.PrecomputedData, toData toda
 				continue
 			}
 
-			if _, ok := dsStats.DeliveryService[ds]; !ok {
-				// TODO use sync pool?
-				dsStats.DeliveryService[ds] = *(dsdata.NewStat())
+			httpDsStat, hadHttpDsStat := dsStats.DeliveryService[ds]
+			if !hadHttpDsStat {
+				httpDsStat = dsdata.NewStat() // TODO sync.Pool?
+				dsStats.DeliveryService[ds] = httpDsStat
+			}
+
+			httpDsStatCg := httpDsStat.CacheGroups[cachegroup]
+			if httpDsStatCg == nil {
+				httpDsStatCg = &dsdata.StatCacheStats{}
+				httpDsStat.CacheGroups[cachegroup] = httpDsStatCg
+			}
+
+			httpDsStatType := httpDsStat.Types[serverType]
+			if httpDsStatType == nil {
+				httpDsStatType = &dsdata.StatCacheStats{}
+				httpDsStat.Types[serverType] = httpDsStatType
 			}
-			httpDsStat := dsStats.DeliveryService[ds]
-			httpDsStat.TotalStats = SumDSAstats(httpDsStat.TotalStats, resultStat)
-			httpDsStat.CacheGroups[cachegroup] = SumDSAstats(httpDsStat.CacheGroups[cachegroup], resultStat)
-			httpDsStat.Types[serverType] = SumDSAstats(httpDsStat.Types[serverType], resultStat)
-			httpDsStat.Caches[server] = SumDSAstats(httpDsStat.Caches[server], resultStat)
-			httpDsStat.CommonStats = dsStats.DeliveryService[ds].CommonStats
-			dsStats.DeliveryService[ds] = httpDsStat // TODO determine if necessary? Change to pointers, so it isn't?
+
+			httpDsStatCache := httpDsStat.Caches[server]
+			if httpDsStatCache == nil {
+				httpDsStatCache = &dsdata.StatCacheStats{}
+				httpDsStat.Caches[server] = httpDsStatCache
+			}
+
+			SumDSAstats(&httpDsStat.TotalStats, resultStat)
+			SumDSAstats(httpDsStatCg, resultStat)
+			SumDSAstats(httpDsStatType, resultStat)
+			SumDSAstats(httpDsStatCache, resultStat)
+			httpDsStat.CommonStats = dsStats.DeliveryService[ds].CommonStats // TODO verify whether this should be a sum
 		}
 	}
 
-	perSecStats, lastStats := addPerSecStats(precomputed, dsStats, lastStats, toData.ServerCachegroups, toData.ServerTypes, mc, events, states)
+	addPerSecStats(precomputed, dsStats, lastStats, toData.ServerCachegroups, toData.ServerTypes, mc, events, states)
 	log.Infof("CreateStats took %v\n", time.Since(start))
-	perSecStats.Time = time.Now()
-	return perSecStats, lastStats, nil
+	dsStats.Time = time.Now()
+	return dsStats, nil
 }
 
 func getDSErr(dsName tc.DeliveryServiceName, dsStats dsdata.StatCacheStats, monitorConfig tc.TrafficMonitorConfigMap) error {
@@ -404,13 +436,11 @@ func getDSErr(dsName tc.DeliveryServiceName, dsStats dsdata.StatCacheStats, moni
 	return nil
 }
 
-func SumDSAstats(ds dsdata.StatCacheStats, cacheStat *cache.AStat) dsdata.StatCacheStats {
-	// TODO change to pointer? Change everything in dsdata to pointers?
+func SumDSAstats(ds *dsdata.StatCacheStats, cacheStat *cache.AStat) {
 	ds.OutBytes.Value += int64(cacheStat.OutBytes)
 	ds.InBytes.Value += float64(cacheStat.InBytes)
 	ds.Status2xx.Value += int64(cacheStat.Status2xx)
 	ds.Status3xx.Value += int64(cacheStat.Status3xx)
 	ds.Status4xx.Value += int64(cacheStat.Status4xx)
 	ds.Status5xx.Value += int64(cacheStat.Status5xx)
-	return ds
 }
diff --git a/traffic_monitor/ds/stat_test.go b/traffic_monitor/ds/stat_test.go
index ba78db1..269b91d 100644
--- a/traffic_monitor/ds/stat_test.go
+++ b/traffic_monitor/ds/stat_test.go
@@ -64,12 +64,16 @@ func TestCreateStats(t *testing.T) {
 
 	monitorConfig := getMockMonitorConfig(dses)
 
-	dsStats, lastStats, err := CreateStats(precomputeds, toData, combinedCRStates.Get(), lastStatsThs.Get().Copy(), now, monitorConfig, events, localCRStates)
+	lastStatsVal := lastStatsThs.Get()
+	lastStatsCopy := lastStatsVal.Copy()
+	dsStats, err := CreateStats(precomputeds, toData, combinedCRStates.Get(), lastStatsCopy, now, monitorConfig, events, localCRStates)
 
 	if err != nil {
 		t.Fatalf("CreateStats err expected: nil, actual: " + err.Error())
 	}
 
+	lastStatsThs.Set(*lastStatsCopy)
+
 	cgMap := map[tc.CacheGroupName]struct{}{}
 	for _, cg := range toData.ServerCachegroups {
 		cgMap[cg] = struct{}{}
@@ -107,7 +111,7 @@ func TestCreateStats(t *testing.T) {
 				}
 			}
 
-			if errStr := compareAStatToStatCacheStats(&cgExpected, &cgStat); errStr != "" {
+			if errStr := compareAStatToStatCacheStats(&cgExpected, cgStat); errStr != "" {
 				t.Fatalf("CreateStats cachegroup " + string(cgName) + ": " + errStr)
 			}
 
@@ -134,7 +138,7 @@ func TestCreateStats(t *testing.T) {
 				}
 			}
 
-			if errStr := compareAStatToStatCacheStats(&tpExpected, &tpStat); errStr != "" {
+			if errStr := compareAStatToStatCacheStats(&tpExpected, tpStat); errStr != "" {
 				t.Fatalf("CreateStats type " + string(tpName) + ": " + errStr)
 			}
 		}
@@ -160,7 +164,7 @@ func TestCreateStats(t *testing.T) {
 				}
 			}
 
-			if errStr := compareAStatToStatCacheStats(&caExpected, &caStat); errStr != "" {
+			if errStr := compareAStatToStatCacheStats(&caExpected, caStat); errStr != "" {
 				t.Fatalf("CreateStats cache " + string(caName) + ": " + errStr)
 			}
 		}
@@ -191,12 +195,12 @@ func TestCreateStats(t *testing.T) {
 		}
 	}
 
-	if len(lastStats.DeliveryServices) != len(toData.DeliveryServiceServers) {
-		t.Fatalf("CreateStats len(LastStats.DeliveryServices) expected: %+v actual: %+v", len(toData.DeliveryServiceServers), len(lastStats.DeliveryServices))
+	if len(lastStatsCopy.DeliveryServices) != len(toData.DeliveryServiceServers) {
+		t.Fatalf("CreateStats len(LastStats.DeliveryServices) expected: %+v actual: %+v", len(toData.DeliveryServiceServers), len(lastStatsCopy.DeliveryServices))
 	}
 
-	if len(lastStats.Caches) != len(toData.ServerDeliveryServices) {
-		t.Fatalf("CreateStats len(LastStats.Caches) expected: %+v actual: %+v", len(toData.ServerDeliveryServices), len(lastStats.Caches))
+	if len(lastStatsCopy.Caches) != len(toData.ServerDeliveryServices) {
+		t.Fatalf("CreateStats len(LastStats.Caches) expected: %+v actual: %+v", len(toData.ServerDeliveryServices), len(lastStatsCopy.Caches))
 	}
 
 }
diff --git a/traffic_monitor/dsdata/stat.go b/traffic_monitor/dsdata/stat.go
index 328eac5..32ad5e2 100644
--- a/traffic_monitor/dsdata/stat.go
+++ b/traffic_monitor/dsdata/stat.go
@@ -63,11 +63,11 @@ type StatsReadonly interface {
 
 // StatReadonly is a read-only interface for a delivery service Stat, designed to be passed to multiple goroutine readers.
 type StatReadonly interface {
-	Copy() Stat
+	Copy() *Stat
 	Common() StatCommonReadonly
-	CacheGroup(name tc.CacheGroupName) (StatCacheStats, bool)
-	Type(name tc.CacheType) (StatCacheStats, bool)
-	Total() StatCacheStats
+	CacheGroup(name tc.CacheGroupName) (*StatCacheStats, bool)
+	Type(name tc.CacheType) (*StatCacheStats, bool)
+	Total() *StatCacheStats
 }
 
 // StatCommonReadonly is a read-only interface for a delivery service's common Stat data, designed to be passed to multiple goroutine readers.
@@ -224,9 +224,9 @@ func (a StatCacheStats) Sum(b StatCacheStats) StatCacheStats {
 // Stat represents a complete delivery service stat, for a given poll, or at the time requested.
 type Stat struct {
 	CommonStats StatCommon
-	CacheGroups map[tc.CacheGroupName]StatCacheStats
-	Types       map[tc.CacheType]StatCacheStats
-	Caches      map[tc.CacheName]StatCacheStats
+	CacheGroups map[tc.CacheGroupName]*StatCacheStats
+	Types       map[tc.CacheType]*StatCacheStats
+	Caches      map[tc.CacheName]*StatCacheStats
 	TotalStats  StatCacheStats
 }
 
@@ -236,21 +236,22 @@ var ErrNotProcessedStat = errors.New("This stat is not used.")
 // NewStat returns a new delivery service Stat, initializing pointer members.
 func NewStat() *Stat {
 	return &Stat{
-		CacheGroups: map[tc.CacheGroupName]StatCacheStats{},
-		Types:       map[tc.CacheType]StatCacheStats{},
+		CacheGroups: map[tc.CacheGroupName]*StatCacheStats{},
+		Types:       map[tc.CacheType]*StatCacheStats{},
 		CommonStats: StatCommon{CachesReporting: map[tc.CacheName]bool{}},
-		Caches:      map[tc.CacheName]StatCacheStats{},
+		Caches:      map[tc.CacheName]*StatCacheStats{},
 	}
 }
 
 // Copy performs a deep copy of this Stat. It does not modify, and is thus safe for multiple goroutines.
-func (a Stat) Copy() Stat {
-	b := Stat{
+func (a Stat) Copy() *Stat {
+	// TODO sync.Pool. Better yet, remove copy usage
+	b := &Stat{
 		CommonStats: a.CommonStats.Copy(),
 		TotalStats:  a.TotalStats,
-		CacheGroups: map[tc.CacheGroupName]StatCacheStats{},
-		Types:       map[tc.CacheType]StatCacheStats{},
-		Caches:      map[tc.CacheName]StatCacheStats{},
+		CacheGroups: map[tc.CacheGroupName]*StatCacheStats{},
+		Types:       map[tc.CacheType]*StatCacheStats{},
+		Caches:      map[tc.CacheName]*StatCacheStats{},
 	}
 	for k, v := range a.CacheGroups {
 		b.CacheGroups[k] = v
@@ -265,36 +266,36 @@ func (a Stat) Copy() Stat {
 }
 
 // Common returns the common stat data for this stat. It is part of the StatCommonReadonly interface.
-func (a Stat) Common() StatCommonReadonly {
+func (a *Stat) Common() StatCommonReadonly {
 	return a.CommonStats
 }
 
 // CacheGroup returns the data for the given cachegroup in this stat. It is part of the StatCommonReadonly interface.
-func (a Stat) CacheGroup(name tc.CacheGroupName) (StatCacheStats, bool) {
+func (a *Stat) CacheGroup(name tc.CacheGroupName) (*StatCacheStats, bool) {
 	c, ok := a.CacheGroups[name]
 	return c, ok
 }
 
 // Type returns the aggregated data for the given cache type in this stat. It is part of the StatCommonReadonly interface.
-func (a Stat) Type(name tc.CacheType) (StatCacheStats, bool) {
+func (a *Stat) Type(name tc.CacheType) (*StatCacheStats, bool) {
 	t, ok := a.Types[name]
 	return t, ok
 }
 
 // Total returns the aggregated total data in this stat. It is part of the StatCommonReadonly interface.
-func (a Stat) Total() StatCacheStats {
-	return a.TotalStats
+func (a *Stat) Total() *StatCacheStats {
+	return &a.TotalStats
 }
 
 // Stats is the JSON-serialisable representation of delivery service Stats. It maps delivery service names to individual stat objects.
 type Stats struct {
-	DeliveryService map[tc.DeliveryServiceName]Stat `json:"deliveryService"`
-	Time            time.Time                       `json:"-"`
+	DeliveryService map[tc.DeliveryServiceName]*Stat `json:"deliveryService"`
+	Time            time.Time                        `json:"-"`
 }
 
 // Copy performs a deep copy of this Stats object.
-func (s Stats) Copy() Stats {
-	b := NewStats()
+func (s *Stats) Copy() *Stats {
+	b := NewStats(len(s.DeliveryService))
 	for k, v := range s.DeliveryService {
 		b.DeliveryService[k] = v.Copy()
 	}
@@ -329,31 +330,32 @@ func (s Stats) JSON(filter Filter, params url.Values) StatsOld {
 		for cacheType, typeStats := range stat.Types {
 			jsonObj = addStatCacheStats(jsonObj, typeStats, deliveryService, "type."+cacheType.String()+".", now, filter)
 		}
-		jsonObj = addStatCacheStats(jsonObj, stat.TotalStats, deliveryService, "total.", now, filter)
+		jsonObj = addStatCacheStats(jsonObj, &stat.TotalStats, deliveryService, "total.", now, filter)
 	}
 	return *jsonObj
 }
 
 // NewStats creates a new Stats object, initializing any pointer members.
 // TODO rename to just 'New'?
-func NewStats() Stats {
-	return Stats{DeliveryService: map[tc.DeliveryServiceName]Stat{}}
+func NewStats(size int) *Stats {
+	return &Stats{DeliveryService: make(map[tc.DeliveryServiceName]*Stat, size)}
 }
 
 // LastStats includes the previously recieved stats for DeliveryServices and Caches, the stat itself, when it was received, and the stat value per second.
 type LastStats struct {
-	DeliveryServices map[tc.DeliveryServiceName]LastDSStat
-	Caches           map[tc.CacheName]LastStatsData
+	DeliveryServices map[tc.DeliveryServiceName]*LastDSStat
+	Caches           map[tc.CacheName]*LastStatsData
 }
 
 // NewLastStats returns a new LastStats object, initializing internal pointer values.
-func NewLastStats() LastStats {
-	return LastStats{DeliveryServices: map[tc.DeliveryServiceName]LastDSStat{}, Caches: map[tc.CacheName]LastStatsData{}}
+func NewLastStats(dsLen, cacheLen int) *LastStats {
+	// TODO add map size params?
+	return &LastStats{DeliveryServices: map[tc.DeliveryServiceName]*LastDSStat{}, Caches: map[tc.CacheName]*LastStatsData{}}
 }
 
 // Copy performs a deep copy of this LastStats object.
-func (a LastStats) Copy() LastStats {
-	b := NewLastStats()
+func (a *LastStats) Copy() *LastStats {
+	b := NewLastStats(len(a.DeliveryServices), len(a.Caches))
 	for k, v := range a.DeliveryServices {
 		b.DeliveryServices[k] = v.Copy()
 	}
@@ -366,19 +368,19 @@ func (a LastStats) Copy() LastStats {
 // LastDSStat maps and aggregates the last stats received for the given delivery service to caches, cache groups, types, and total.
 // TODO figure a way to associate this type with StatHTTP, with which its members correspond.
 type LastDSStat struct {
-	Caches      map[tc.CacheName]LastStatsData
-	CacheGroups map[tc.CacheGroupName]LastStatsData
-	Type        map[tc.CacheType]LastStatsData
+	Caches      map[tc.CacheName]*LastStatsData
+	CacheGroups map[tc.CacheGroupName]*LastStatsData
+	Type        map[tc.CacheType]*LastStatsData
 	Total       LastStatsData
 	Available   bool
 }
 
 // Copy performs a deep copy of this LastDSStat object.
-func (a LastDSStat) Copy() LastDSStat {
-	b := LastDSStat{
-		CacheGroups: map[tc.CacheGroupName]LastStatsData{},
-		Type:        map[tc.CacheType]LastStatsData{},
-		Caches:      map[tc.CacheName]LastStatsData{},
+func (a LastDSStat) Copy() *LastDSStat {
+	b := &LastDSStat{
+		CacheGroups: map[tc.CacheGroupName]*LastStatsData{},
+		Type:        map[tc.CacheType]*LastStatsData{},
+		Caches:      map[tc.CacheName]*LastStatsData{},
 		Total:       a.Total,
 		Available:   a.Available,
 	}
@@ -394,15 +396,8 @@ func (a LastDSStat) Copy() LastDSStat {
 	return b
 }
 
-func newLastDSStat() LastDSStat {
-	return LastDSStat{
-		CacheGroups: map[tc.CacheGroupName]LastStatsData{},
-		Type:        map[tc.CacheType]LastStatsData{},
-		Caches:      map[tc.CacheName]LastStatsData{},
-	}
-}
-
 // LastStatsData contains the last stats and per-second calculations for bytes and status codes received from a cache.
+// TODO sync.Pool?
 type LastStatsData struct {
 	Bytes     LastStatData
 	Status2xx LastStatData
@@ -412,14 +407,17 @@ type LastStatsData struct {
 }
 
 // Sum returns the Sum() of each member data with the given LastStatsData corresponding members
-func (a LastStatsData) Sum(b LastStatsData) LastStatsData {
-	return LastStatsData{
-		Bytes:     a.Bytes.Sum(b.Bytes),
-		Status2xx: a.Status2xx.Sum(b.Status2xx),
-		Status3xx: a.Status3xx.Sum(b.Status3xx),
-		Status4xx: a.Status4xx.Sum(b.Status4xx),
-		Status5xx: a.Status5xx.Sum(b.Status5xx),
-	}
+func (a *LastStatsData) Sum(b *LastStatsData) {
+	a.Bytes.PerSec += b.Bytes.PerSec
+	a.Bytes.Stat += b.Bytes.Stat
+	a.Status2xx.PerSec += b.Status2xx.PerSec
+	a.Status2xx.Stat += b.Status2xx.Stat
+	a.Status3xx.PerSec += b.Status3xx.PerSec
+	a.Status3xx.Stat += b.Status3xx.Stat
+	a.Status4xx.PerSec += b.Status4xx.PerSec
+	a.Status4xx.Stat += b.Status4xx.Stat
+	a.Status5xx.PerSec += b.Status5xx.PerSec
+	a.Status5xx.Stat += b.Status5xx.Stat
 }
 
 // LastStatData contains the value, time it was received, and per-second calculation since the previous stat, for a stat from a cache.
@@ -429,14 +427,6 @@ type LastStatData struct {
 	Time   time.Time
 }
 
-// Sum adds the PerSec and Stat of the given data to this object. Time is meaningless for the summed object, and is thus set to 0.
-func (a LastStatData) Sum(b LastStatData) LastStatData {
-	return LastStatData{
-		PerSec: a.PerSec + b.PerSec,
-		Stat:   a.Stat + b.Stat,
-	}
-}
-
 func addCommonData(s *StatsOld, c *StatCommon, deliveryService tc.DeliveryServiceName, t int64, filter Filter) *StatsOld {
 	add := func(name string, val interface{}) {
 		if filter.UseStat(name) {
@@ -454,7 +444,7 @@ func addCommonData(s *StatsOld, c *StatCommon, deliveryService tc.DeliveryServic
 	return s
 }
 
-func addStatCacheStats(s *StatsOld, c StatCacheStats, deliveryService tc.DeliveryServiceName, prefix string, t int64, filter Filter) *StatsOld {
+func addStatCacheStats(s *StatsOld, c *StatCacheStats, deliveryService tc.DeliveryServiceName, prefix string, t int64, filter Filter) *StatsOld {
 	add := func(name, val string) {
 		if filter.UseStat(name) {
 			// This is for compatibility with the Traffic Monitor 1.0 API.
diff --git a/traffic_monitor/manager/stat.go b/traffic_monitor/manager/stat.go
index fd14f57..8c9d706 100644
--- a/traffic_monitor/manager/stat.go
+++ b/traffic_monitor/manager/stat.go
@@ -20,6 +20,8 @@ package manager
  */
 
 import (
+	"os"
+	"runtime"
 	"time"
 
 	"github.com/apache/trafficcontrol/lib/go-log"
@@ -100,6 +102,16 @@ func StartStatHistoryManager(
 	}
 
 	go func() {
+		defer func() {
+			if err := recover(); err != nil {
+				log.Errorf("StatHistoryManager panic: %v\n", err)
+			} else {
+				log.Errorln("StatHistoryManager failed without panic")
+			}
+			log.Errorf("%s\n", stacktrace())
+			os.Exit(1) // The monitor can't run without a stat processor
+		}()
+
 		flushTimer := time.NewTimer(cfg.StatFlushInterval)
 		// Note! bufferTimer MAY be uninitialized! If there is no cfg.StatBufferInterval, the timer WILL NOT be created with time.NewTimer(), and thus is NOT initialized, and MUST NOT have functions called, such as timer.Stop()! Those functions WILL panic.
 		bufferTimer := &time.Timer{}
@@ -208,6 +220,18 @@ func StartStatHistoryManager(
 	return statInfoHistory, statResultHistory, statMaxKbpses, lastStatDurations, lastStats, &dsStats, unpolledCaches, localCacheStatus
 }
 
+func stacktrace() []byte {
+	initialBufSize := 1024
+	buf := make([]byte, initialBufSize)
+	for {
+		n := runtime.Stack(buf, true)
+		if n < len(buf) {
+			return buf[:n]
+		}
+		buf = make([]byte, len(buf)*2)
+	}
+}
+
 // processStatResults processes the given results, creating and setting DSStats, LastStats, and other stats. Note this is NOT threadsafe, and MUST NOT be called from multiple threads.
 func processStatResults(
 	results []cache.Result,
@@ -282,13 +306,16 @@ func processStatResults(
 	statInfoHistoryThreadsafe.Set(statInfoHistory)
 	statMaxKbpsesThreadsafe.Set(statMaxKbpses)
 
-	newDsStats, newLastStats, err := ds.CreateStats(precomputedData, toData, combinedStates, lastStats.Get().Copy(), time.Now(), mc, events, localStates)
+	lastStatsVal := lastStats.Get()
+	lastStatsCopy := lastStatsVal.Copy()
+	newDsStats, err := ds.CreateStats(precomputedData, toData, combinedStates, lastStatsCopy, time.Now(), mc, events, localStates)
+
 	if err != nil {
 		errorCount.Inc()
 		log.Errorf("getting deliveryservice: %v\n", err)
 	} else {
-		dsStats.Set(newDsStats)
-		lastStats.Set(newLastStats)
+		dsStats.Set(*newDsStats)
+		lastStats.Set(*lastStatsCopy)
 	}
 
 	health.CalcAvailabilityWithStats(results, "stat", statResultHistoryThreadsafe, mc, toData, localCacheStatusThreadsafe, localStates, events)
diff --git a/traffic_monitor/threadsafe/dsstats.go b/traffic_monitor/threadsafe/dsstats.go
index 9206d91..f7612e8 100644
--- a/traffic_monitor/threadsafe/dsstats.go
+++ b/traffic_monitor/threadsafe/dsstats.go
@@ -38,8 +38,7 @@ type DSStatsReader interface {
 
 // NewDSStats returns a deliveryservice.Stats object wrapped to be safe for multiple readers and a single writer.
 func NewDSStats() DSStats {
-	s := dsdata.NewStats()
-	return DSStats{m: &sync.RWMutex{}, dsStats: &s}
+	return DSStats{m: &sync.RWMutex{}, dsStats: dsdata.NewStats(0)}
 }
 
 // Get returns a Stats object safe for reading by multiple goroutines
diff --git a/traffic_monitor/threadsafe/lastkbpsstats.go b/traffic_monitor/threadsafe/lastkbpsstats.go
index f4ac424..32781b9 100644
--- a/traffic_monitor/threadsafe/lastkbpsstats.go
+++ b/traffic_monitor/threadsafe/lastkbpsstats.go
@@ -33,19 +33,18 @@ type LastStats struct {
 
 // NewLastStats returns a wrapped a deliveryservice.LastStats object safe for multiple readers and one writer.
 func NewLastStats() LastStats {
-	s := dsdata.NewLastStats()
-	return LastStats{m: &sync.RWMutex{}, stats: &s}
+	return LastStats{m: &sync.RWMutex{}, stats: dsdata.NewLastStats(0, 0)}
 }
 
 // Get returns the last KBPS stats object. Callers MUST NOT modify the object. It is not threadsafe for writing. If the object must be modified, callers must call LastStats.Copy() and modify the copy.
-func (o *LastStats) Get() dsdata.LastStats {
+func (o LastStats) Get() dsdata.LastStats {
 	o.m.RLock()
 	defer o.m.RUnlock()
 	return *o.stats
 }
 
 // Set sets the internal LastStats object. This MUST NOT be called by multiple goroutines.
-func (o *LastStats) Set(s dsdata.LastStats) {
+func (o LastStats) Set(s dsdata.LastStats) {
 	o.m.Lock()
 	*o.stats = s
 	o.m.Unlock()
diff --git a/traffic_monitor/threadsafe/resultstathistory.go b/traffic_monitor/threadsafe/resultstathistory.go
index 94256e5..91cea01 100644
--- a/traffic_monitor/threadsafe/resultstathistory.go
+++ b/traffic_monitor/threadsafe/resultstathistory.go
@@ -31,6 +31,7 @@ import (
 	"github.com/apache/trafficcontrol/lib/go-tc"
 	"github.com/apache/trafficcontrol/traffic_monitor/cache"
 	"github.com/apache/trafficcontrol/traffic_monitor/srvhttp"
+
 	"github.com/json-iterator/go"
 )