You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by rn...@apache.org on 2014/08/01 11:11:22 UTC
[36/48] mem3 commit: updated refs/heads/windsor-merge to ff02b9a
Allow targets to exceed floor, add another check
Sometimes we want to transfer a shard to a target even though it's
already at the floor. We add another check to make sure we're not
wasitng effort -- the difference in shard counts between the source and
the target must be 2 or greater.
We also refactor the global shard count code to avoid future atom /
binary problems.
BugzID: 24466
Project: http://git-wip-us.apache.org/repos/asf/couchdb-mem3/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-mem3/commit/a6ca5c6f
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-mem3/tree/a6ca5c6f
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-mem3/diff/a6ca5c6f
Branch: refs/heads/windsor-merge
Commit: a6ca5c6fa36f8b49bf7785a932b30aef8693b286
Parents: 5c3c9c9
Author: Adam Kocoloski <ad...@cloudant.com>
Authored: Wed Oct 30 14:17:03 2013 -0400
Committer: Robert Newson <rn...@apache.org>
Committed: Wed Jul 23 18:46:27 2014 +0100
----------------------------------------------------------------------
src/mem3_rebalance.erl | 23 ++++++++++++++++-------
1 file changed, 16 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/couchdb-mem3/blob/a6ca5c6f/src/mem3_rebalance.erl
----------------------------------------------------------------------
diff --git a/src/mem3_rebalance.erl b/src/mem3_rebalance.erl
index b7d161c..5589e45 100644
--- a/src/mem3_rebalance.erl
+++ b/src/mem3_rebalance.erl
@@ -135,7 +135,7 @@ global_expand(TargetNodes0, LocalOps, Limit) ->
lists:member(Node, TargetNodes)
end, shard_count_by_node(LocalOps)),
TotalCount = lists:foldl(fun({_, C}, Sum) -> Sum + C end, 0, CountByNode),
- TargetLevel = (TotalCount div length(TargetNodes)) + 1,
+ TargetLevel = TotalCount div length(TargetNodes),
FoldFun = fun
(_, Acc) when length(Acc) >= Limit ->
% We've already accumulated the max number of shard ops.
@@ -176,11 +176,12 @@ donate_fold(#shard{node = Node} = Shard, #gacc{node = Node} = Acc0) ->
InZone = filter_map_by_zone(shards_by_node(Shards, Nodes), Zone),
SortedByCount = lists:sort(smallest_first(Moves), InZone),
SourceCount = get_shard_count(Node, SortedByCount),
+ GlobalShardCounts = shard_count_by_node(Moves),
+ TotalSource = get_global_shard_count(Node, GlobalShardCounts),
Fun = fun({CandidateNode, OwnShards}) ->
HasRange = lists:keymember(Shard#shard.range, #shard.range, OwnShards),
TargetCount = get_shard_count(CandidateNode, SortedByCount),
- NodeKey = couch_util:to_binary(CandidateNode),
- Total = couch_util:get_value(NodeKey, shard_count_by_node(Moves)),
+ TotalTarget = get_global_shard_count(CandidateNode, GlobalShardCounts),
if
CandidateNode =:= Node ->
% Can't move a shard to ourselves
@@ -191,8 +192,11 @@ donate_fold(#shard{node = Node} = Shard, #gacc{node = Node} = Acc0) ->
TargetCount >= SourceCount ->
% Executing this move would create a local imbalance in the DB
true;
- Total >= TargetLevel ->
- % The candidate has already achieved the target level
+ TotalTarget > TargetLevel ->
+ % The candidate has already exceeded the target level
+ true;
+ (TotalSource - TotalTarget) < 2 ->
+ % Donating here is wasted work
true;
true ->
false
@@ -216,6 +220,11 @@ donate_fold(_Shard, Acc) ->
get_shard_count(AtomKey, ShardsByNode) when is_atom(AtomKey) ->
length(couch_util:get_value(AtomKey, ShardsByNode, [])).
+get_global_shard_count(Node, Counts) when is_atom(Node) ->
+ get_global_shard_count(couch_util:to_binary(Node), Counts);
+get_global_shard_count(Node, Counts) when is_binary(Node) ->
+ couch_util:get_value(Node, Counts, 0).
+
compute_moves(IdealZoning, IdealZoning, _Copies, OtherMoves) ->
OtherMoves;
compute_moves(IdealZoning, ActualZoning, Copies, OtherMoves) ->
@@ -330,8 +339,8 @@ smallest_first(PrevMoves) ->
fun(A, B) -> sort_by_count(A, B, Global) =< 0 end.
sort_by_count({NodeA, SA}, {NodeB, SB}, Global) when length(SA) =:= length(SB) ->
- CountA = couch_util:get_value(couch_util:to_binary(NodeA), Global, 0),
- CountB = couch_util:get_value(couch_util:to_binary(NodeB), Global, 0),
+ CountA = get_global_shard_count(NodeA, Global),
+ CountB = get_global_shard_count(NodeB, Global),
cmp(CountA, CountB);
sort_by_count({_, A}, {_, B}, _) ->
cmp(length(A), length(B)).