You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by br...@apache.org on 2014/01/14 22:18:11 UTC
[2/3] git commit: Remove time penalty from DES. Patch by Tyler Hobbs,
reviewed by brandonwilliams for CASSANDRA-6465
Remove time penalty from DES.
Patch by Tyler Hobbs, reviewed by brandonwilliams for CASSANDRA-6465
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/200e494e
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/200e494e
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/200e494e
Branch: refs/heads/trunk
Commit: 200e494e7fd305cacb638e13a98b18356d124def
Parents: 97c6bbe
Author: Brandon Williams <br...@apache.org>
Authored: Tue Jan 14 15:15:41 2014 -0600
Committer: Brandon Williams <br...@apache.org>
Committed: Tue Jan 14 15:15:41 2014 -0600
----------------------------------------------------------------------
.../cassandra/locator/DynamicEndpointSnitch.java | 18 ++----------------
1 file changed, 2 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/200e494e/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java b/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
index ff8c70a..535dbb3 100644
--- a/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
+++ b/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
@@ -228,32 +228,18 @@ public class DynamicEndpointSnitch extends AbstractEndpointSnitch implements ILa
}
double maxLatency = 1;
- long maxPenalty = 1;
- HashMap<InetAddress, Long> penalties = new HashMap<InetAddress, Long>(samples.size());
- // We're going to weight the latency and time since last reply for each host against the worst one we see, to arrive at sort of a 'badness percentage' for both of them.
- // first, find the worst for each.
+ // We're going to weight the latency for each host against the worst one we see, to
+ // arrive at sort of a 'badness percentage' for them. First, find the worst for each:
for (Map.Entry<InetAddress, ExponentiallyDecayingSample> entry : samples.entrySet())
{
double mean = entry.getValue().getSnapshot().getMedian();
if (mean > maxLatency)
maxLatency = mean;
- long timePenalty = lastReceived.containsKey(entry.getKey()) ? lastReceived.get(entry.getKey()) : System.nanoTime();
- timePenalty = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - timePenalty);
- timePenalty = timePenalty > UPDATE_INTERVAL_IN_MS ? UPDATE_INTERVAL_IN_MS : timePenalty;
- // a convenient place to remember this since we've already calculated it and need it later
- penalties.put(entry.getKey(), timePenalty);
- if (timePenalty > maxPenalty)
- maxPenalty = timePenalty;
}
// now make another pass to do the weighting based on the maximums we found before
for (Map.Entry<InetAddress, ExponentiallyDecayingSample> entry: samples.entrySet())
{
double score = entry.getValue().getSnapshot().getMedian() / maxLatency;
- if (penalties.containsKey(entry.getKey()))
- score += penalties.get(entry.getKey()) / ((double) maxPenalty);
- else
- // there's a chance a host was added to the samples after our previous loop to get the time penalties. Add 1.0 to it, or '100% bad' for the time penalty.
- score += 1; // maxPenalty / maxPenalty
// finally, add the severity without any weighting, since hosts scale this relative to their own load and the size of the task causing the severity.
// "Severity" is basically a measure of compaction activity (CASSANDRA-3722).
score += StorageService.instance.getSeverity(entry.getKey());