You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by sb...@apache.org on 2017/12/28 14:18:50 UTC
ignite git commit: Added cache name in GridDhtPartitionTopologyImpl
logging.
Repository: ignite
Updated Branches:
refs/heads/master 429f9544a -> c7b4201e8
Added cache name in GridDhtPartitionTopologyImpl logging.
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/c7b4201e
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/c7b4201e
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/c7b4201e
Branch: refs/heads/master
Commit: c7b4201e845a69a829f2e81ba2bcd4c33e9785eb
Parents: 429f954
Author: sboikov <sb...@gridgain.com>
Authored: Thu Dec 28 17:16:10 2017 +0300
Committer: sboikov <sb...@gridgain.com>
Committed: Thu Dec 28 17:18:43 2017 +0300
----------------------------------------------------------------------
.../dht/GridDhtPartitionTopologyImpl.java | 168 ++++++++++++-------
1 file changed, 104 insertions(+), 64 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ignite/blob/c7b4201e/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
index abe276f..e09d611 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
@@ -362,8 +362,10 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
assert owned : "Failed to own partition for oldest node [grp=" + grp.cacheOrGroupName() +
", part=" + locPart + ']';
- if (log.isDebugEnabled())
- log.debug("Owned partition for oldest node: " + locPart);
+ if (log.isDebugEnabled()) {
+ log.debug("Owned partition for oldest node [grp=" + grp.cacheOrGroupName() +
+ ", part=" + locPart + ']');
+ }
updateSeq = updateLocal(p, locPart.state(), updateSeq, affVer);
}
@@ -389,9 +391,10 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
updateSeq = updateLocal(p, locPart.state(), updateSeq, affVer);
- if (log.isDebugEnabled())
- log.debug("Evicting partition with rebalancing disabled " +
- "(it does not belong to affinity): " + locPart);
+ if (log.isDebugEnabled()) {
+ log.debug("Evicting partition with rebalancing disabled (it does not belong to " +
+ "affinity) [grp=" + grp.cacheOrGroupName() + ", part=" + locPart + ']');
+ }
}
}
else
@@ -476,8 +479,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
ClusterNode oldest = discoCache.oldestAliveServerNode();
if (log.isDebugEnabled()) {
- log.debug("Partition map beforeExchange [exchId=" + exchFut.exchangeId() +
- ", fullMap=" + fullMapString() + ']');
+ log.debug("Partition map beforeExchange [grp=" + grp.cacheOrGroupName() +
+ ", exchId=" + exchFut.exchangeId() + ", fullMap=" + fullMapString() + ']');
}
long updateSeq = this.updateSeq.incrementAndGet();
@@ -491,9 +494,11 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (node2part == null) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq);
- if (log.isDebugEnabled())
- log.debug("Created brand new full topology map on oldest node [exchId=" +
- exchFut.exchangeId() + ", fullMap=" + fullMapString() + ']');
+ if (log.isDebugEnabled()) {
+ log.debug("Created brand new full topology map on oldest node [" +
+ "grp=" + grp.cacheOrGroupName() + ", exchId=" + exchFut.exchangeId() +
+ ", fullMap=" + fullMapString() + ']');
+ }
}
else if (!node2part.valid()) {
node2part = new GridDhtPartitionFullMap(oldest.id(),
@@ -503,7 +508,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
false);
if (log.isDebugEnabled()) {
- log.debug("Created new full topology map on oldest node [exchId=" + exchFut.exchangeId() +
+ log.debug("Created new full topology map on oldest node [" +
+ "grp=" + grp.cacheOrGroupName() + ", exchId=" + exchFut.exchangeId() +
", fullMap=" + node2part + ']');
}
}
@@ -516,7 +522,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (log.isDebugEnabled()) {
log.debug("Copied old map into new map on oldest node (previous oldest node left) [" +
- "exchId=" + exchFut.exchangeId() + ", fullMap=" + fullMapString() + ']');
+ "grp=" + grp.cacheOrGroupName() + ", exchId=" + exchFut.exchangeId() +
+ ", fullMap=" + fullMapString() + ']');
}
}
}
@@ -560,8 +567,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
}
if (log.isDebugEnabled()) {
- log.debug("Partition map after beforeExchange [exchId=" + exchFut.exchangeId() +
- ", fullMap=" + fullMapString() + ']');
+ log.debug("Partition map after beforeExchange [grp=" + grp.cacheOrGroupName() + ", " +
+ "exchId=" + exchFut.exchangeId() + ", fullMap=" + fullMapString() + ']');
}
}
finally {
@@ -584,7 +591,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
}
/** {@inheritDoc} */
- @Override public boolean afterExchange(GridDhtPartitionsExchangeFuture exchFut) throws IgniteCheckedException {
+ @Override public boolean afterExchange(GridDhtPartitionsExchangeFuture exchFut) {
boolean changed = false;
int num = grp.affinity().partitions();
@@ -592,7 +599,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
AffinityTopologyVersion topVer = exchFut.context().events().topologyVersion();
assert grp.affinity().lastVersion().equals(topVer) : "Affinity is not initialized " +
- "[topVer=" + topVer +
+ "[grp=" + grp.cacheOrGroupName() +
+ ", topVer=" + topVer +
", affVer=" + grp.affinity().lastVersion() +
", fut=" + exchFut + ']';
@@ -609,9 +617,11 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
assert readyTopVer.initialized() : readyTopVer;
assert lastTopChangeVer.equals(readyTopVer);
- if (log.isDebugEnabled())
- log.debug("Partition map before afterExchange [exchId=" + exchFut.exchangeId() + ", fullMap=" +
- fullMapString() + ']');
+ if (log.isDebugEnabled()) {
+ log.debug("Partition map before afterExchange [grp=" + grp.cacheOrGroupName() +
+ ", exchId=" + exchFut.exchangeId() +
+ ", fullMap=" + fullMapString() + ']');
+ }
long updateSeq = this.updateSeq.incrementAndGet();
@@ -622,8 +632,10 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
// This partition will be created during next topology event,
// which obviously has not happened at this point.
if (locPart == null) {
- if (log.isDebugEnabled())
- log.debug("Skipping local partition afterExchange (will not create): " + p);
+ if (log.isDebugEnabled()) {
+ log.debug("Skipping local partition afterExchange (will not create) [" +
+ "grp=" + grp.cacheOrGroupName() + ", p=" + p + ']');
+ }
continue;
}
@@ -638,8 +650,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (F.isEmpty(owners)) {
boolean owned = locPart.own();
- assert owned : "Failed to own partition [grp=" + grp.cacheOrGroupName() + ", locPart=" +
- locPart + ']';
+ assert owned : "Failed to own partition [grp=" + grp.cacheOrGroupName() +
+ ", locPart=" + locPart + ']';
updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer);
@@ -655,12 +667,14 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
discoEvt.timestamp());
}
- if (log.isDebugEnabled())
- log.debug("Owned partition: " + locPart);
+ if (log.isDebugEnabled()) {
+ log.debug("Owned partition [grp=" + grp.cacheOrGroupName() +
+ ", part=" + locPart + ']');
+ }
}
else if (log.isDebugEnabled())
- log.debug("Will not own partition (there are owners to rebalance from) [locPart=" +
- locPart + ", owners = " + owners + ']');
+ log.debug("Will not own partition (there are owners to rebalance from) [grp=" + grp.cacheOrGroupName() +
+ ", locPart=" + locPart + ", owners = " + owners + ']');
}
else
updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer);
@@ -677,8 +691,10 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
changed = true;
- if (log.isDebugEnabled())
- log.debug("Evicting moving partition (it does not belong to affinity): " + locPart);
+ if (log.isDebugEnabled()) {
+ log.debug("Evicting moving partition (it does not belong to affinity) [" +
+ "grp=" + grp.cacheOrGroupName() + ", part=" + locPart + ']');
+ }
}
}
}
@@ -834,20 +850,22 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (!belongs) {
throw new GridDhtInvalidPartitionException(p, "Adding entry to evicted partition " +
"(often may be caused by inconsistent 'key.hashCode()' implementation) " +
- "[part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.readyTopVer + ']');
+ "[grp=" + grp.cacheOrGroupName() + ", part=" + p + ", topVer=" + topVer +
+ ", this.topVer=" + this.readyTopVer + ']');
}
}
else if (loc != null && state == RENTING && !showRenting) {
throw new GridDhtInvalidPartitionException(p, "Adding entry to partition that is concurrently " +
- "evicted [part=" + p + ", shouldBeMoving=" + loc.reload() + ", belongs=" + belongs +
- ", topVer=" + topVer + ", curTopVer=" + this.readyTopVer + "]");
+ "evicted [grp=" + grp.cacheOrGroupName() + ", part=" + p + ", shouldBeMoving=" +
+ loc.reload() + ", belongs=" + belongs + ", topVer=" + topVer + ", curTopVer=" + this.readyTopVer + "]");
}
if (loc == null) {
if (!belongs)
throw new GridDhtInvalidPartitionException(p, "Creating partition which does not belong to " +
"local node (often may be caused by inconsistent 'key.hashCode()' implementation) " +
- "[part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.readyTopVer + ']');
+ "[grp=" + grp.cacheOrGroupName() + ", part=" + p + ", topVer=" + topVer +
+ ", this.topVer=" + this.readyTopVer + ']');
locParts.set(p, loc = new GridDhtLocalPartition(ctx, grp, p));
@@ -857,7 +875,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
created = true;
if (log.isDebugEnabled())
- log.debug("Created local partition: " + loc);
+ log.debug("Created local partition [grp=" + grp.cacheOrGroupName() + ", part=" + loc + ']');
}
}
finally {
@@ -1026,7 +1044,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (!topVer.equals(diffFromAffinityVer)) {
LT.warn(log, "Requested topology version does not match calculated diff, will require full iteration to" +
- "calculate mapping [topVer=" + topVer + ", diffVer=" + diffFromAffinityVer + "]");
+ "calculate mapping [grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer +
+ ", diffVer=" + diffFromAffinityVer + "]");
nodes = new ArrayList<>();
@@ -1054,7 +1073,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
for (UUID nodeId : diffIds) {
if (affIds.contains(nodeId)) {
- U.warn(log, "Node from diff " + nodeId + " is affinity node. Skipping it.");
+ U.warn(log, "Node from diff is affinity node, skipping it [grp=" + grp.cacheOrGroupName() +
+ ", node=" + nodeId + ']');
continue;
}
@@ -1202,8 +1222,10 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
@Nullable CachePartitionFullCountersMap incomeCntrMap,
Set<Integer> partsToReload,
@Nullable AffinityTopologyVersion msgTopVer) {
- if (log.isDebugEnabled())
- log.debug("Updating full partition map [exchVer=" + exchangeVer + ", parts=" + fullMapString() + ']');
+ if (log.isDebugEnabled()) {
+ log.debug("Updating full partition map [grp=" + grp.cacheOrGroupName() + ", exchVer=" + exchangeVer +
+ ", fullMap=" + fullMapString() + ']');
+ }
assert partMap != null;
@@ -1239,7 +1261,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
// Ignore if exchange already finished or new exchange started.
if (readyTopVer.compareTo(exchangeVer) > 0 || lastTopChangeVer.compareTo(exchangeVer) > 0) {
U.warn(log, "Stale exchange id for full partition map update (will ignore) [" +
- "lastTopChange=" + lastTopChangeVer +
+ "grp=" + grp.cacheOrGroupName() +
+ ", lastTopChange=" + lastTopChangeVer +
", readTopVer=" + readyTopVer +
", exchVer=" + exchangeVer + ']');
@@ -1249,7 +1272,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (msgTopVer != null && lastTopChangeVer.compareTo(msgTopVer) > 0) {
U.warn(log, "Stale version for full partition map update message (will ignore) [" +
- "lastTopChange=" + lastTopChangeVer +
+ "grp=" + grp.cacheOrGroupName() +
+ ", lastTopChange=" + lastTopChangeVer +
", readTopVer=" + readyTopVer +
", msgVer=" + msgTopVer + ']');
@@ -1266,7 +1290,9 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
fullMapUpdated = true;
if (log.isDebugEnabled()) {
- log.debug("Overriding partition map in full update map [exchVer=" + exchangeVer +
+ log.debug("Overriding partition map in full update map [" +
+ "grp=" + grp.cacheOrGroupName() +
+ ", exchVer=" + exchangeVer +
", curPart=" + mapString(part) +
", newPart=" + mapString(newPart) + ']');
}
@@ -1295,8 +1321,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (!ctx.discovery().alive(nodeId)) {
if (log.isDebugEnabled())
- log.debug("Removing left node from full map update [nodeId=" + nodeId + ", partMap=" +
- partMap + ']');
+ log.debug("Removing left node from full map update [grp=" + grp.cacheOrGroupName() +
+ ", nodeId=" + nodeId + ", partMap=" + partMap + ']');
it.remove();
}
@@ -1311,7 +1337,9 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (!fullMapUpdated) {
if (log.isDebugEnabled()) {
- log.debug("No updates for full partition map (will ignore) [lastExch=" + lastTopChangeVer +
+ log.debug("No updates for full partition map (will ignore) [" +
+ "grp=" + grp.cacheOrGroupName() +
+ ", lastExch=" + lastTopChangeVer +
", exchVer=" + exchangeVer +
", curMap=" + node2part +
", newMap=" + partMap + ']');
@@ -1426,8 +1454,10 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
consistencyCheck();
- if (log.isDebugEnabled())
- log.debug("Partition map after full update: " + fullMapString());
+ if (log.isDebugEnabled()) {
+ log.debug("Partition map after full update [grp=" + grp.cacheOrGroupName() +
+ ", map=" + fullMapString() + ']');
+ }
if (changed)
ctx.exchange().scheduleResendPartitions();
@@ -1457,7 +1487,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (acquired - now >= 100) {
if (timeLog.isInfoEnabled())
timeLog.info("Waited too long to acquire topology write lock " +
- "[cache=" + grp.groupId() + ", waitTime=" + (acquired - now) + ']');
+ "[grp=" + grp.cacheOrGroupName() + ", waitTime=" + (acquired - now) + ']');
}
if (stopping)
@@ -1492,7 +1522,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (acquired - now >= 100) {
if (timeLog.isInfoEnabled())
timeLog.info("Waited too long to acquire topology write lock " +
- "[cache=" + grp.groupId() + ", waitTime=" + (acquired - now) + ']');
+ "[grp=" + grp.cacheOrGroupName() + ", waitTime=" + (acquired - now) + ']');
}
if (stopping)
@@ -1540,13 +1570,16 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
GridDhtPartitionMap parts,
boolean force
) {
- if (log.isDebugEnabled())
- log.debug("Updating single partition map [exchId=" + exchId + ", parts=" + mapString(parts) + ']');
+ if (log.isDebugEnabled()) {
+ log.debug("Updating single partition map [grp=" + grp.cacheOrGroupName() + ", exchId=" + exchId +
+ ", parts=" + mapString(parts) + ']');
+ }
if (!ctx.discovery().alive(parts.nodeId())) {
- if (log.isDebugEnabled())
- log.debug("Received partition update for non-existing node (will ignore) [exchId=" + exchId +
- ", parts=" + parts + ']');
+ if (log.isDebugEnabled()) {
+ log.debug("Received partition update for non-existing node (will ignore) [grp=" + grp.cacheOrGroupName() +
+ ", exchId=" + exchId + ", parts=" + parts + ']');
+ }
return false;
}
@@ -1563,7 +1596,8 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
if (!force) {
if (lastTopChangeVer.initialized() && exchId != null && lastTopChangeVer.compareTo(exchId.topologyVersion()) > 0) {
U.warn(log, "Stale exchange id for single partition map update (will ignore) [" +
- "lastTopChange=" + lastTopChangeVer +
+ "grp=" + grp.cacheOrGroupName() +
+ ", lastTopChange=" + lastTopChangeVer +
", readTopVer=" + readyTopVer +
", exch=" + exchId.topologyVersion() + ']');
@@ -1582,7 +1616,9 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
parts.updateSequence(cur.updateSequence(), cur.topologyVersion());
}
else if (isStaleUpdate(cur, parts)) {
- U.warn(log, "Stale update for single partition map update (will ignore) [exchId=" + exchId +
+ U.warn(log, "Stale update for single partition map update (will ignore) [" +
+ "grp=" + grp.cacheOrGroupName() +
+ ", exchId=" + exchId +
", curMap=" + cur +
", newMap=" + parts + ']');
@@ -1659,7 +1695,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
consistencyCheck();
if (log.isDebugEnabled())
- log.debug("Partition map after single update: " + fullMapString());
+ log.debug("Partition map after single update [grp=" + grp.cacheOrGroupName() + ", map=" + fullMapString() + ']');
if (changed && exchId == null)
ctx.exchange().scheduleResendPartitions();
@@ -1979,7 +2015,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
}
U.warn(log, "Partition has been scheduled for rebalancing due to outdated update counter " +
- "[nodeId=" + ctx.localNodeId() + ", cacheOrGroupName=" + grp.cacheOrGroupName() +
+ "[nodeId=" + ctx.localNodeId() + ", grp=" + grp.cacheOrGroupName() +
", partId=" + locPart.id() + ", haveHistory=" + haveHistory + "]");
}
@@ -2006,7 +2042,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
this.updateSeq.setIfGreater(partMap.updateSequence());
U.warn(log, "Partition has been scheduled for rebalancing due to outdated update counter " +
- "[nodeId=" + e.getKey() + ", cacheOrGroupName=" + grp.cacheOrGroupName() +
+ "[nodeId=" + e.getKey() + ", grp=" + grp.cacheOrGroupName() +
", partId=" + p + ", haveHistory=" + haveHistory + "]");
}
}
@@ -2060,8 +2096,10 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
changed = true;
- if (log.isDebugEnabled())
- log.debug("Evicted local partition (all affinity nodes are owners): " + part);
+ if (log.isDebugEnabled()) {
+ log.debug("Evicted local partition (all affinity nodes are owners) [grp=" + grp.cacheOrGroupName() +
+ ", part=" + part + ']');
+ }
}
else {
int ownerCnt = nodeIds.size();
@@ -2088,9 +2126,10 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
changed = true;
- if (log.isDebugEnabled())
- log.debug("Evicted local partition (this node is oldest non-affinity node): " +
- part);
+ if (log.isDebugEnabled()) {
+ log.debug("Evicted local partition (this node is oldest non-affinity node) [" +
+ "grp=" + grp.cacheOrGroupName() + ", part=" + part + ']');
+ }
break;
}
@@ -2134,6 +2173,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
boolean b = this.updateSeq.compareAndSet(seq0, seq + 1);
assert b : "Invalid update sequence [updateSeq=" + updateSeq +
+ ", grp=" + grp.cacheOrGroupName() +
", seq=" + seq +
", curUpdateSeq=" + this.updateSeq.get() +
", node2part=" + node2part.toFullString() + ']';
@@ -2468,7 +2508,7 @@ public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
rebalancedTopVer = readyTopVer;
if (log.isDebugEnabled())
- log.debug("Updated rebalanced version [cache=" + grp.cacheOrGroupName() + ", ver=" + rebalancedTopVer + ']');
+ log.debug("Updated rebalanced version [grp=" + grp.cacheOrGroupName() + ", ver=" + rebalancedTopVer + ']');
}
}