You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@flink.apache.org by GitBox <gi...@apache.org> on 2018/11/02 08:08:39 UTC

[GitHub] yanghua commented on a change in pull request #6850: [FLINK-10252] Handle oversized metric messges

yanghua commented on a change in pull request #6850: [FLINK-10252] Handle oversized metric messges
URL: https://github.com/apache/flink/pull/6850#discussion_r230295056
 
 

 ##########
 File path: flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/MetricDumpSerialization.java
 ##########
 @@ -124,55 +160,135 @@ public MetricSerializationResult serialize(
 			Map<Counter, Tuple2<QueryScopeInfo, String>> counters,
 			Map<Gauge<?>, Tuple2<QueryScopeInfo, String>> gauges,
 			Map<Histogram, Tuple2<QueryScopeInfo, String>> histograms,
-			Map<Meter, Tuple2<QueryScopeInfo, String>> meters) {
+			Map<Meter, Tuple2<QueryScopeInfo, String>> meters,
+			long maximumFramesize) {
+
+			boolean markUnserializedMetrics = false;
 
-			buffer.clear();
+			Map<Counter, Tuple2<QueryScopeInfo, String>> unserializedCounters = new HashMap<>();
+			Map<Gauge<?>, Tuple2<QueryScopeInfo, String>> unserializedGauges = new HashMap<>();
+			Map<Histogram, Tuple2<QueryScopeInfo, String>> unserializedHistograms = new HashMap<>();
+			Map<Meter, Tuple2<QueryScopeInfo, String>> unserializedMeters = new HashMap<>();
 
+			countersBuffer.clear();
 			int numCounters = 0;
 			for (Map.Entry<Counter, Tuple2<QueryScopeInfo, String>> entry : counters.entrySet()) {
+				if (markUnserializedMetrics) {
+					unserializedCounters.put(entry.getKey(), entry.getValue());
+					continue;
+				}
+
 				try {
-					serializeCounter(buffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
+					serializeCounter(countersBuffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
 					numCounters++;
+					if (countersBuffer.length() > maximumFramesize) {
+						LOG.warn("The serialized counter metric is larger than the maximum frame size, " +
+							" so maybe not all metrics would be reported.");
+						markUnserializedMetrics = true;
+						//clear all, because we can not revoke the latest metrics which caused overflow
+						unserializedCounters.put(entry.getKey(), entry.getValue());
+						countersBuffer.clear();
+						numCounters = 0;
+					}
 				} catch (Exception e) {
 					LOG.debug("Failed to serialize counter.", e);
 				}
 			}
 
+			gaugesBuffer.clear();
 			int numGauges = 0;
 			for (Map.Entry<Gauge<?>, Tuple2<QueryScopeInfo, String>> entry : gauges.entrySet()) {
+				if (markUnserializedMetrics) {
+					unserializedGauges.put(entry.getKey(), entry.getValue());
+					continue;
+				}
+
 				try {
-					serializeGauge(buffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
+					serializeGauge(gaugesBuffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
 					numGauges++;
+					if (gaugesBuffer.length() + countersBuffer.length() > maximumFramesize) {
+						LOG.warn("The serialized gauge metric is larger than the maximum frame size, " +
+							" so maybe not all metrics would be reported.");
+						markUnserializedMetrics = true;
+						unserializedGauges.put(entry.getKey(), entry.getValue());
+						gaugesBuffer.clear();
+						numGauges = 0;
+					}
 				} catch (Exception e) {
 					LOG.debug("Failed to serialize gauge.", e);
 				}
 			}
 
+			histogramsBuffer.clear();
 			int numHistograms = 0;
 			for (Map.Entry<Histogram, Tuple2<QueryScopeInfo, String>> entry : histograms.entrySet()) {
+				if (markUnserializedMetrics) {
+					unserializedHistograms.put(entry.getKey(), entry.getValue());
+					continue;
+				}
+
 				try {
-					serializeHistogram(buffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
+					serializeHistogram(histogramsBuffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
 					numHistograms++;
+					if (histogramsBuffer.length() + gaugesBuffer.length() + countersBuffer.length() > maximumFramesize) {
+						LOG.warn("The serialized histogram metric is larger than the maximum frame size, " +
+							" so maybe not all metrics would be reported.");
+						markUnserializedMetrics = true;
+						unserializedHistograms.put(entry.getKey(), entry.getValue());
+						histogramsBuffer.clear();
+						numHistograms = 0;
+					}
 				} catch (Exception e) {
 					LOG.debug("Failed to serialize histogram.", e);
 				}
 			}
 
+			metersBuffer.clear();
 			int numMeters = 0;
 			for (Map.Entry<Meter, Tuple2<QueryScopeInfo, String>> entry : meters.entrySet()) {
+				if (markUnserializedMetrics) {
+					unserializedMeters.put(entry.getKey(), entry.getValue());
+					continue;
+				}
+
 				try {
-					serializeMeter(buffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
+					serializeMeter(metersBuffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
 					numMeters++;
+					if (metersBuffer.length() + histogramsBuffer.length() + gaugesBuffer.length() +
 
 Review comment:
   Hi, @zentol I don't quite understand what this means?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services