You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@tomcat.apache.org by ma...@apache.org on 2023/05/10 15:16:58 UTC

[tomcat] branch 8.5.x updated: Switch HTTP/2 to use RFC 9218 priorities rather than RFC 7540

This is an automated email from the ASF dual-hosted git repository.

markt pushed a commit to branch 8.5.x
in repository https://gitbox.apache.org/repos/asf/tomcat.git


The following commit(s) were added to refs/heads/8.5.x by this push:
     new 3c4572b75e Switch HTTP/2 to use RFC 9218 priorities rather than RFC 7540
3c4572b75e is described below

commit 3c4572b75e57375fadbb85a5a2e133b67db912c3
Author: Mark Thomas <ma...@apache.org>
AuthorDate: Tue May 9 20:01:46 2023 +0100

    Switch HTTP/2 to use RFC 9218 priorities rather than RFC 7540
---
 .../apache/coyote/http2/AbstractNonZeroStream.java |  88 ---
 java/org/apache/coyote/http2/AbstractStream.java   |  49 --
 .../coyote/http2/ConnectionSettingsBase.java       |  16 +
 java/org/apache/coyote/http2/Constants.java        |   7 +-
 java/org/apache/coyote/http2/FrameType.java        |  25 +-
 java/org/apache/coyote/http2/Http2Parser.java      |  74 ++-
 .../apache/coyote/http2/Http2UpgradeHandler.java   | 383 +++++--------
 .../apache/coyote/http2/LocalStrings.properties    |   9 +-
 .../apache/coyote/http2/LocalStrings_es.properties |   3 -
 .../apache/coyote/http2/LocalStrings_fr.properties |   9 +-
 .../apache/coyote/http2/LocalStrings_ja.properties |   9 +-
 .../apache/coyote/http2/LocalStrings_ko.properties |   5 -
 .../coyote/http2/LocalStrings_zh_CN.properties     |   5 -
 java/org/apache/coyote/http2/Setting.java          |   4 +
 java/org/apache/coyote/http2/Stream.java           |  36 +-
 .../util/http/parser/LocalStrings.properties       |  14 +
 .../util/http/parser/LocalStrings_fr.properties    |  14 +
 .../util/http/parser/LocalStrings_ja.properties    |  14 +
 .../util/http/parser/LocalStrings_ko.properties    |  14 +
 .../util/http/parser/LocalStrings_zh_CN.properties |   8 +
 .../apache/tomcat/util/http/parser/Priority.java   |  92 ++++
 .../tomcat/util/http/parser/StructuredField.java   | 598 +++++++++++++++++++++
 test/org/apache/coyote/http2/Http2TestBase.java    |  48 +-
 .../apache/coyote/http2/TestAbstractStream.java    | 286 ----------
 .../apache/coyote/http2/TestHttp2Section_5_3.java  | 303 -----------
 test/org/apache/coyote/http2/TestRfc9218.java      | 174 ++++++
 .../tomcat/util/http/parser/TestPriority.java      |  32 +-
 .../http/parser/TesterHttpWgStructuredField.java   | 130 +++++
 webapps/docs/changelog.xml                         |   9 +
 29 files changed, 1406 insertions(+), 1052 deletions(-)

diff --git a/java/org/apache/coyote/http2/AbstractNonZeroStream.java b/java/org/apache/coyote/http2/AbstractNonZeroStream.java
index 9eea0b1ccb..aae614f42c 100644
--- a/java/org/apache/coyote/http2/AbstractNonZeroStream.java
+++ b/java/org/apache/coyote/http2/AbstractNonZeroStream.java
@@ -17,11 +17,6 @@
 package org.apache.coyote.http2;
 
 import java.nio.ByteBuffer;
-import java.util.Iterator;
-
-import org.apache.juli.logging.Log;
-import org.apache.juli.logging.LogFactory;
-import org.apache.tomcat.util.res.StringManager;
 
 /**
  * Base class for all streams other than stream 0, the connection. Primarily provides functionality shared between full
@@ -29,15 +24,10 @@ import org.apache.tomcat.util.res.StringManager;
  */
 abstract class AbstractNonZeroStream extends AbstractStream {
 
-    private static final Log log = LogFactory.getLog(AbstractNonZeroStream.class);
-    private static final StringManager sm = StringManager.getManager(AbstractNonZeroStream.class);
-
     protected static final ByteBuffer ZERO_LENGTH_BYTEBUFFER = ByteBuffer.allocate(0);
 
     protected final StreamStateMachine state;
 
-    private volatile int weight = Constants.DEFAULT_WEIGHT;
-
 
     AbstractNonZeroStream(String connectionId, Integer identifier) {
         super(identifier);
@@ -51,84 +41,6 @@ abstract class AbstractNonZeroStream extends AbstractStream {
     }
 
 
-    @Override
-    final int getWeight() {
-        return weight;
-    }
-
-
-    /*
-     * General method used when reprioritising a stream and care needs to be taken not to create circular references.
-     *
-     * Changes to the priority tree need to be synchronized at the connection level. This is the caller's
-     * responsibility.
-     */
-    final void rePrioritise(AbstractStream parent, boolean exclusive, int weight) {
-        if (log.isDebugEnabled()) {
-            log.debug(sm.getString("stream.reprioritisation.debug", getConnectionId(), getIdAsString(),
-                    Boolean.toString(exclusive), parent.getIdAsString(), Integer.toString(weight)));
-        }
-
-        // Check if new parent is a descendant of this stream
-        if (isDescendant(parent)) {
-            parent.detachFromParent();
-            // Cast is always safe since any descendant of this stream must be
-            // an instance of AbstractNonZeroStream
-            getParentStream().addChild((AbstractNonZeroStream) parent);
-        }
-
-        if (exclusive) {
-            // Need to move children of the new parent to be children of this
-            // stream. Slightly convoluted to avoid concurrent modification.
-            Iterator<AbstractNonZeroStream> parentsChildren = parent.getChildStreams().iterator();
-            while (parentsChildren.hasNext()) {
-                AbstractNonZeroStream parentsChild = parentsChildren.next();
-                parentsChildren.remove();
-                this.addChild(parentsChild);
-            }
-        }
-        detachFromParent();
-        parent.addChild(this);
-        this.weight = weight;
-    }
-
-
-    /*
-     * Used when removing closed streams from the tree and we know there is no need to check for circular references.
-     *
-     * Changes to the priority tree need to be synchronized at the connection level. This is the caller's
-     * responsibility.
-     */
-    final void rePrioritise(AbstractStream parent, int weight) {
-        if (log.isDebugEnabled()) {
-            log.debug(sm.getString("stream.reprioritisation.debug", getConnectionId(), getIdAsString(), Boolean.FALSE,
-                    parent.getIdAsString(), Integer.toString(weight)));
-        }
-
-        parent.addChild(this);
-        this.weight = weight;
-    }
-
-
-    /*
-     * Used when "recycling" a stream and replacing a Stream instance with a RecycledStream instance.
-     *
-     * Replace this stream with the provided stream in the parent/child hierarchy.
-     *
-     * Changes to the priority tree need to be synchronized at the connection level. This is the caller's
-     * responsibility.
-     */
-    void replaceStream(AbstractNonZeroStream replacement) {
-        getParentStream().addChild(replacement);
-        detachFromParent();
-        for (AbstractNonZeroStream child : getChildStreams()) {
-            replacement.addChild(child);
-        }
-        getChildStreams().clear();
-        replacement.weight = weight;
-    }
-
-
     final boolean isClosedFinal() {
         return state.isClosedFinal();
     }
diff --git a/java/org/apache/coyote/http2/AbstractStream.java b/java/org/apache/coyote/http2/AbstractStream.java
index 0cc5dc3e2a..f332b8c593 100644
--- a/java/org/apache/coyote/http2/AbstractStream.java
+++ b/java/org/apache/coyote/http2/AbstractStream.java
@@ -16,10 +16,6 @@
  */
 package org.apache.coyote.http2;
 
-import java.util.Collections;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
 import org.apache.juli.logging.Log;
 import org.apache.juli.logging.LogFactory;
 import org.apache.tomcat.util.res.StringManager;
@@ -36,9 +32,6 @@ abstract class AbstractStream {
     private final Integer identifier;
     private final String idAsString;
 
-    private volatile AbstractStream parentStream = null;
-    private final Set<AbstractNonZeroStream> childStreams =
-            Collections.newSetFromMap(new ConcurrentHashMap<AbstractNonZeroStream,Boolean>());
     private long windowSize = ConnectionSettingsBase.DEFAULT_INITIAL_WINDOW_SIZE;
 
     private volatile int connectionAllocationRequested = 0;
@@ -66,46 +59,6 @@ abstract class AbstractStream {
     }
 
 
-    final void detachFromParent() {
-        if (parentStream != null) {
-            parentStream.getChildStreams().remove(this);
-            parentStream = null;
-        }
-    }
-
-
-    final void addChild(AbstractNonZeroStream child) {
-        child.setParentStream(this);
-        childStreams.add(child);
-    }
-
-
-    final boolean isDescendant(AbstractStream stream) {
-        // Is the passed in Stream a descendant of this Stream?
-        // Start at the passed in Stream and work up
-        AbstractStream parent = stream.getParentStream();
-        while (parent != null && parent != this) {
-            parent = parent.getParentStream();
-        }
-        return parent != null;
-    }
-
-
-    final AbstractStream getParentStream() {
-        return parentStream;
-    }
-
-
-    final void setParentStream(AbstractStream parentStream) {
-        this.parentStream = parentStream;
-    }
-
-
-    final Set<AbstractNonZeroStream> getChildStreams() {
-        return childStreams;
-    }
-
-
     final synchronized void setWindowSize(long windowSize) {
         this.windowSize = windowSize;
     }
@@ -183,6 +136,4 @@ abstract class AbstractStream {
 
 
     abstract String getConnectionId();
-
-    abstract int getWeight();
 }
diff --git a/java/org/apache/coyote/http2/ConnectionSettingsBase.java b/java/org/apache/coyote/http2/ConnectionSettingsBase.java
index af1128be2b..26af41656b 100644
--- a/java/org/apache/coyote/http2/ConnectionSettingsBase.java
+++ b/java/org/apache/coyote/http2/ConnectionSettingsBase.java
@@ -45,6 +45,9 @@ abstract class ConnectionSettingsBase<T extends Throwable> {
     static final int DEFAULT_MAX_FRAME_SIZE = MIN_MAX_FRAME_SIZE;
     static final long DEFAULT_MAX_HEADER_LIST_SIZE = 1 << 15;
 
+    // Defaults (defined by Tomcat)
+    static final long DEFAULT_NO_RFC7540_PRIORITIES = 1;
+
     Map<Setting,Long> current = new ConcurrentHashMap<>();
     Map<Setting,Long> pending = new ConcurrentHashMap<>();
 
@@ -58,6 +61,7 @@ abstract class ConnectionSettingsBase<T extends Throwable> {
         current.put(Setting.INITIAL_WINDOW_SIZE, Long.valueOf(DEFAULT_INITIAL_WINDOW_SIZE));
         current.put(Setting.MAX_FRAME_SIZE, Long.valueOf(DEFAULT_MAX_FRAME_SIZE));
         current.put(Setting.MAX_HEADER_LIST_SIZE, Long.valueOf(DEFAULT_MAX_HEADER_LIST_SIZE));
+        current.put(Setting.NO_RFC7540_PRIORITIES, Long.valueOf(DEFAULT_NO_RFC7540_PRIORITIES));
     }
 
 
@@ -86,6 +90,9 @@ abstract class ConnectionSettingsBase<T extends Throwable> {
             case MAX_HEADER_LIST_SIZE:
                 // No further validation required
                 break;
+            case NO_RFC7540_PRIORITIES:
+                validateNoRfc7540Priorities(value);
+                break;
             case UNKNOWN:
                 // Unrecognised. Ignore it.
                 return;
@@ -211,6 +218,15 @@ abstract class ConnectionSettingsBase<T extends Throwable> {
     }
 
 
+    private void validateNoRfc7540Priorities(long noRfc7540Priorities) throws T {
+        if (noRfc7540Priorities < 0 || noRfc7540Priorities > 1) {
+            String msg = sm.getString("connectionSettings.noRfc7540PrioritiesInvalid", connectionId,
+                    Long.toString(noRfc7540Priorities));
+            throwException(msg, Http2Error.PROTOCOL_ERROR);
+        }
+    }
+
+
     abstract void throwException(String msg, Http2Error error) throws T;
 
     abstract String getEndpointName();
diff --git a/java/org/apache/coyote/http2/Constants.java b/java/org/apache/coyote/http2/Constants.java
index 739ae7eb12..5575c763c7 100644
--- a/java/org/apache/coyote/http2/Constants.java
+++ b/java/org/apache/coyote/http2/Constants.java
@@ -18,7 +18,12 @@ package org.apache.coyote.http2;
 
 public class Constants {
 
-    // Prioritisation
+    /**
+     * RFC 7540 prioritisation default weight.
+     *
+     * @deprecated Unused. Will be removed in Tomcat 11 onwards.
+     */
+    @Deprecated
     public static final int DEFAULT_WEIGHT = 16;
 
     // Parsing
diff --git a/java/org/apache/coyote/http2/FrameType.java b/java/org/apache/coyote/http2/FrameType.java
index c757b5b21f..4f7b792c46 100644
--- a/java/org/apache/coyote/http2/FrameType.java
+++ b/java/org/apache/coyote/http2/FrameType.java
@@ -20,17 +20,18 @@ import org.apache.tomcat.util.res.StringManager;
 
 enum FrameType {
     // @formatter:off
-    DATA          (0,   false,  true, null,               false),
-    HEADERS       (1,   false,  true, null,                true),
-    PRIORITY      (2,   false,  true, equals(5),          false),
-    RST           (3,   false,  true, equals(4),          false),
-    SETTINGS      (4,    true, false, dividableBy(6),      true),
-    PUSH_PROMISE  (5,   false,  true, greaterOrEquals(4),  true),
-    PING          (6,    true, false, equals(8),          false),
-    GOAWAY        (7,    true, false, greaterOrEquals(8), false),
-    WINDOW_UPDATE (8,    true,  true, equals(4),           true),
-    CONTINUATION  (9,   false,  true, null,                true),
-    UNKNOWN       (256,  true,  true, null,               false);
+    DATA            (  0, false,  true, null,               false),
+    HEADERS         (  1, false,  true, null,                true),
+    PRIORITY        (  2, false,  true, equals(5),          false),
+    RST             (  3, false,  true, equals(4),          false),
+    SETTINGS        (  4,  true, false, dividableBy(6),      true),
+    PUSH_PROMISE    (  5, false,  true, greaterOrEquals(4),  true),
+    PING            (  6,  true, false, equals(8),          false),
+    GOAWAY          (  7,  true, false, greaterOrEquals(8), false),
+    WINDOW_UPDATE   (  8,  true,  true, equals(4),           true),
+    CONTINUATION    (  9, false,  true, null,                true),
+    PRIORITY_UPDATE ( 16,  true, false, greaterOrEquals(4),  true),
+    UNKNOWN         (256,  true,  true, null,               false);
     // @formatter:on
 
     private static final StringManager sm = StringManager.getManager(FrameType.class);
@@ -105,6 +106,8 @@ enum FrameType {
                 return WINDOW_UPDATE;
             case 9:
                 return CONTINUATION;
+            case 16:
+                return PRIORITY_UPDATE;
             default:
                 return UNKNOWN;
         }
diff --git a/java/org/apache/coyote/http2/Http2Parser.java b/java/org/apache/coyote/http2/Http2Parser.java
index 9d52a43452..2962c0699e 100644
--- a/java/org/apache/coyote/http2/Http2Parser.java
+++ b/java/org/apache/coyote/http2/Http2Parser.java
@@ -16,7 +16,11 @@
  */
 package org.apache.coyote.http2;
 
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
 import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 
@@ -25,6 +29,7 @@ import org.apache.coyote.http2.HpackDecoder.HeaderEmitter;
 import org.apache.juli.logging.Log;
 import org.apache.juli.logging.LogFactory;
 import org.apache.tomcat.util.buf.ByteBufferUtils;
+import org.apache.tomcat.util.http.parser.Priority;
 import org.apache.tomcat.util.res.StringManager;
 
 class Http2Parser {
@@ -131,6 +136,9 @@ class Http2Parser {
             case CONTINUATION:
                 readContinuationFrame(streamId, flags, payloadSize);
                 break;
+            case PRIORITY_UPDATE:
+                readPriorityUpdateFrame(payloadSize);
+                break;
             case UNKNOWN:
                 readUnknownFrame(streamId, frameTypeId, flags, payloadSize);
         }
@@ -238,21 +246,16 @@ class Http2Parser {
         if (optionalLen > 0) {
             byte[] optional = new byte[optionalLen];
             input.fill(true, optional);
-            int optionalPos = 0;
             if (padding) {
-                padLength = ByteUtil.getOneByte(optional, optionalPos++);
+                padLength = ByteUtil.getOneByte(optional, 0);
                 if (padLength >= payloadSize) {
                     throw new ConnectionException(sm.getString("http2Parser.processFrame.tooMuchPadding", connectionId,
                             Integer.toString(streamId), Integer.toString(padLength), Integer.toString(payloadSize)),
                             Http2Error.PROTOCOL_ERROR);
                 }
             }
-            if (priority) {
-                boolean exclusive = ByteUtil.isBit7Set(optional[optionalPos]);
-                int parentStreamId = ByteUtil.get31Bits(optional, optionalPos);
-                int weight = ByteUtil.getOneByte(optional, optionalPos + 4) + 1;
-                output.reprioritise(streamId, parentStreamId, exclusive, weight);
-            }
+
+            // Ignore RFC 7450 priority data if present
 
             payloadSize -= optionalLen;
             payloadSize -= padLength;
@@ -270,20 +273,15 @@ class Http2Parser {
     }
 
 
-    private void readPriorityFrame(int streamId) throws Http2Exception, IOException {
-        byte[] payload = new byte[5];
-        input.fill(true, payload);
-
-        boolean exclusive = ByteUtil.isBit7Set(payload[0]);
-        int parentStreamId = ByteUtil.get31Bits(payload, 0);
-        int weight = ByteUtil.getOneByte(payload, 4) + 1;
-
-        if (streamId == parentStreamId) {
-            throw new StreamException(sm.getString("http2Parser.processFramePriority.invalidParent", connectionId,
-                    Integer.valueOf(streamId)), Http2Error.PROTOCOL_ERROR, streamId);
+    protected void readPriorityFrame(int streamId) throws IOException {
+        // RFC 7450 priority frames are ignored. Still need to treat as overhead.
+        try {
+            swallowPayload(streamId, FrameType.PRIORITY.getId(), 5, false);
+        } catch (ConnectionException e) {
+            // Will never happen because swallowPayload() is called with isPadding set
+            // to false
         }
-
-        output.reprioritise(streamId, parentStreamId, exclusive, weight);
+        output.increaseOverheadCount(FrameType.PRIORITY);
     }
 
 
@@ -414,6 +412,32 @@ class Http2Parser {
     }
 
 
+    protected void readPriorityUpdateFrame(int payloadSize) throws Http2Exception, IOException {
+        // Identify prioritized stream ID
+        byte[] payload = new byte[payloadSize];
+        input.fill(true, payload);
+
+        int prioritizedStreamID = ByteUtil.get31Bits(payload, 0);
+
+        if (prioritizedStreamID == 0) {
+            throw new ConnectionException(sm.getString("http2Parser.processFramePriorityUpdate.streamZero"),
+                    Http2Error.PROTOCOL_ERROR);
+        }
+
+        ByteArrayInputStream bais = new ByteArrayInputStream(payload, 4, payloadSize - 4);
+        Reader r = new BufferedReader(new InputStreamReader(bais, StandardCharsets.US_ASCII));
+        Priority p = Priority.parsePriority(r);
+
+        if (log.isDebugEnabled()) {
+            log.debug(sm.getString("http2Parser.processFramePriorityUpdate.debug", connectionId,
+                    Integer.toString(prioritizedStreamID), Integer.toString(p.getUrgency()),
+                    Boolean.valueOf(p.getIncremental())));
+        }
+
+        output.priorityUpdate(prioritizedStreamID, p);
+    }
+
+
     protected void readHeaderPayload(int streamId, int payloadSize) throws Http2Exception, IOException {
 
         if (log.isDebugEnabled()) {
@@ -698,9 +722,6 @@ class Http2Parser {
 
         void headersEnd(int streamId) throws Http2Exception;
 
-        // Priority frames (also headers)
-        void reprioritise(int streamId, int parentStreamId, boolean exclusive, int weight) throws Http2Exception;
-
         // Reset frames
         void reset(int streamId, long errorCode) throws Http2Exception;
 
@@ -718,6 +739,9 @@ class Http2Parser {
         // Window size
         void incrementWindowSize(int streamId, int increment) throws Http2Exception;
 
+        // Priority update
+        void priorityUpdate(int prioritizedStreamID, Priority p) throws Http2Exception;
+
         /**
          * Notification triggered when the parser swallows the payload of an unknown frame.
          *
@@ -729,5 +753,7 @@ class Http2Parser {
          * @throws IOException If an I/O occurred while swallowing the unknown frame
          */
         void onSwallowedUnknownFrame(int streamId, int frameTypeId, int flags, int size) throws IOException;
+
+        void increaseOverheadCount(FrameType frameType);
     }
 }
diff --git a/java/org/apache/coyote/http2/Http2UpgradeHandler.java b/java/org/apache/coyote/http2/Http2UpgradeHandler.java
index 061f5607bf..c5aeaf87ea 100644
--- a/java/org/apache/coyote/http2/Http2UpgradeHandler.java
+++ b/java/org/apache/coyote/http2/Http2UpgradeHandler.java
@@ -21,15 +21,15 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Queue;
 import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
@@ -49,6 +49,7 @@ import org.apache.juli.logging.Log;
 import org.apache.juli.logging.LogFactory;
 import org.apache.tomcat.util.codec.binary.Base64;
 import org.apache.tomcat.util.http.MimeHeaders;
+import org.apache.tomcat.util.http.parser.Priority;
 import org.apache.tomcat.util.log.UserDataHelper;
 import org.apache.tomcat.util.net.AbstractEndpoint.Handler.SocketState;
 import org.apache.tomcat.util.net.SSLSupport;
@@ -88,9 +89,7 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
 
     private static final HeaderSink HEADER_SINK = new HeaderSink();
 
-    private final Object priorityTreeLock = new Object();
-
-    private final String connectionId;
+    protected final String connectionId;
 
     private final Http2Protocol protocol;
     private final Adapter adapter;
@@ -129,8 +128,7 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
     private final AtomicInteger nextLocalStreamId = new AtomicInteger(2);
     private final PingManager pingManager = new PingManager();
     private volatile int newStreamsSinceLastPrune = 0;
-    private final Set<AbstractStream> backLogStreams =
-            Collections.newSetFromMap(new ConcurrentHashMap<AbstractStream,Boolean>());
+    private final Set<Stream> backLogStreams = new HashSet<>();
     private long backLogSize = 0;
     // The time at which the connection will timeout unless data arrives before
     // then. -1 means no timeout.
@@ -938,11 +936,6 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
                         stream.setConnectionAllocationRequested(reservation);
                         backLogSize += reservation;
                         backLogStreams.add(stream);
-                        // Add the parents as well
-                        AbstractStream parent = stream.getParentStream();
-                        while (parent != null && backLogStreams.add(parent)) {
-                            parent = parent.getParentStream();
-                        }
                     }
                 } else if (windowSize < reservation) {
                     allocation = (int) windowSize;
@@ -1038,8 +1031,7 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
 
     private synchronized Set<AbstractStream> releaseBackLog(int increment) throws Http2Exception {
         Set<AbstractStream> result = new HashSet<>();
-        int remaining = increment;
-        if (backLogSize < remaining) {
+        if (backLogSize < increment) {
             // Can clear the whole backlog
             for (AbstractStream stream : backLogStreams) {
                 if (stream.getConnectionAllocationRequested() > 0) {
@@ -1048,22 +1040,102 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
                     result.add(stream);
                 }
             }
-            remaining -= backLogSize;
+            // Cast is safe due to test above
+            int remaining = increment - (int) backLogSize;
             backLogSize = 0;
             super.incrementWindowSize(remaining);
 
             backLogStreams.clear();
         } else {
-            allocate(this, remaining);
-            Iterator<AbstractStream> streamIter = backLogStreams.iterator();
-            while (streamIter.hasNext()) {
-                AbstractStream stream = streamIter.next();
-                if (stream.getConnectionAllocationMade() > 0) {
-                    backLogSize -= stream.getConnectionAllocationMade();
-                    backLogSize -= stream.getConnectionAllocationRequested();
-                    stream.setConnectionAllocationRequested(0);
-                    result.add(stream);
-                    streamIter.remove();
+            // Can't clear the whole backlog.
+            // Need streams in priority order
+            Set<Stream> orderedStreams = new ConcurrentSkipListSet<>(new Comparator<Stream>() {
+
+                @Override
+                public int compare(Stream s1, Stream s2) {
+                    int result = Integer.compare(s1.getUrgency(), s2.getUrgency());
+                    if (result == 0) {
+                       result = Boolean.compare(s1.getIncremental(), s2.getIncremental());
+                       if (result == 0) {
+                           result = Integer.compare(s1.getIdAsInt(), s2.getIdAsInt());
+                       }
+                    }
+                    return result;
+                }
+            });
+            orderedStreams.addAll(backLogStreams);
+
+            // Iteration 1. Need to work out how much we can clear.
+            long urgencyWhereAllocationIsExhausted = 0;
+            long requestedAllocationForIncrementalStreams = 0;
+            int remaining = increment;
+            Iterator<Stream> orderedStreamsIterator = orderedStreams.iterator();
+            while (orderedStreamsIterator.hasNext()) {
+                Stream s = orderedStreamsIterator.next();
+                if (urgencyWhereAllocationIsExhausted < s.getUrgency()) {
+                    if (remaining < 1) {
+                        break;
+                    }
+                    requestedAllocationForIncrementalStreams = 0;
+                }
+                urgencyWhereAllocationIsExhausted = s.getUrgency();
+                if (s.getIncremental()) {
+                    requestedAllocationForIncrementalStreams += s.getConnectionAllocationRequested();
+                    remaining -= s.getConnectionAllocationRequested();
+                } else {
+                    remaining -= s.getConnectionAllocationRequested();
+                    if (remaining < 1) {
+                        break;
+                    }
+                }
+            }
+
+            // Iteration 2. Allocate.
+            // Reset for second iteration
+            remaining = increment;
+            orderedStreamsIterator = orderedStreams.iterator();
+            while (orderedStreamsIterator.hasNext()) {
+                Stream s = orderedStreamsIterator.next();
+                if (s.getUrgency() < urgencyWhereAllocationIsExhausted) {
+                    // Can fully allocate
+                    remaining = allocate(s, remaining);
+                    result.add(s);
+                    orderedStreamsIterator.remove();
+                    backLogStreams.remove(s);
+                } else if (requestedAllocationForIncrementalStreams == 0) {
+                    // Allocation ran out in non-incremental streams so fully
+                    // allocate in iterator order until allocation is exhausted
+                    remaining = allocate(s, remaining);
+                    result.add(s);
+                    if (s.getConnectionAllocationRequested() == 0) {
+                        // Fully allocated
+                        orderedStreamsIterator.remove();
+                        backLogStreams.remove(s);
+                    }
+                    if (remaining < 1) {
+                        break;
+                    }
+                } else {
+                    // Allocation ran out in incremental streams. Distribute
+                    // remaining allocation between the incremental streams at
+                    // this urgency level.
+                    if (s.getUrgency() != urgencyWhereAllocationIsExhausted) {
+                        break;
+                    }
+
+                    int share = (int) (s.getConnectionAllocationRequested() * remaining /
+                            requestedAllocationForIncrementalStreams);
+                    if (share == 0) {
+                        share = 1;
+                    }
+                    allocate(s, share);
+                    result.add(s);
+                    if (s.getConnectionAllocationRequested() == 0) {
+                        // Fully allocated (unlikely but possible due to
+                        // rounding if only a few bytes required).
+                        orderedStreamsIterator.remove();
+                        backLogStreams.remove(s);
+                    }
                 }
             }
         }
@@ -1091,68 +1163,12 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
             leftToAllocate = leftToAllocate - allocatedThisTime;
         }
 
-        if (leftToAllocate == 0) {
-            return 0;
-        }
-
         if (log.isDebugEnabled()) {
             log.debug(sm.getString("upgradeHandler.allocate.left", getConnectionId(), stream.getIdAsString(),
                     Integer.toString(leftToAllocate)));
         }
 
-        // Recipients are children of the current stream that are in the
-        // backlog.
-        Set<AbstractStream> recipients = new HashSet<>();
-        recipients.addAll(stream.getChildStreams());
-        recipients.retainAll(backLogStreams);
-
-        // Loop until we run out of allocation or recipients
-        while (leftToAllocate > 0) {
-            if (recipients.size() == 0) {
-                if (stream.getConnectionAllocationMade() == 0) {
-                    backLogStreams.remove(stream);
-                }
-                if (stream.getIdAsInt() == 0) {
-                    throw new IllegalStateException();
-                }
-                return leftToAllocate;
-            }
-
-            int totalWeight = 0;
-            for (AbstractStream recipient : recipients) {
-                if (log.isDebugEnabled()) {
-                    log.debug(
-                            sm.getString("upgradeHandler.allocate.recipient", getConnectionId(), stream.getIdAsString(),
-                                    recipient.getIdAsString(), Integer.toString(recipient.getWeight())));
-                }
-                totalWeight += recipient.getWeight();
-            }
-
-            // Use an Iterator so fully allocated children/recipients can be
-            // removed.
-            Iterator<AbstractStream> iter = recipients.iterator();
-            int allocated = 0;
-            while (iter.hasNext()) {
-                AbstractStream recipient = iter.next();
-                int share = leftToAllocate * recipient.getWeight() / totalWeight;
-                if (share == 0) {
-                    // This is to avoid rounding issues triggering an infinite
-                    // loop. It will cause a very slight over allocation but
-                    // HTTP/2 should cope with that.
-                    share = 1;
-                }
-                int remainder = allocate(recipient, share);
-                // Remove recipients that receive their full allocation so that
-                // they are excluded from the next allocation round.
-                if (remainder > 0) {
-                    iter.remove();
-                }
-                allocated += (share - remainder);
-            }
-            leftToAllocate -= allocated;
-        }
-
-        return 0;
+        return leftToAllocate;
     }
 
 
@@ -1257,7 +1273,6 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
         // maximum number of concurrent streams.
         long max = localSettings.getMaxConcurrentStreams();
 
-        // Only need ~+10% for streams that are in the priority tree,
         // Ideally need to retain information for a "significant" amount of time
         // after sending END_STREAM (RFC 7540, page 20) so we detect potential
         // connection error. 5x seems reasonable. The client will have had
@@ -1275,100 +1290,23 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
         }
 
         int toClose = size - (int) max;
-        if (toClose < 1) {
-            return;
-        }
-
-        // Need to try and close some streams.
-        // Try to close streams in this order
-        // 1. Completed streams used for a request with no children
-        // 2. Completed streams used for a request with children
-        // 3. Closed final streams
-        //
-        // The pruning halts as soon as enough streams have been pruned.
-
-        // Use these sets to track the different classes of streams
-        TreeSet<Integer> candidatesStepTwo = new TreeSet<>();
-        TreeSet<Integer> candidatesStepThree = new TreeSet<>();
-
-        // Step 1
-        // Iterator is in key order so we automatically have the oldest streams
-        // first
-        // Tests depend on parent/child relationship between streams so need to
-        // lock on priorityTreeLock to ensure a consistent view.
-        synchronized (priorityTreeLock) {
-            for (AbstractNonZeroStream stream : streams.values()) {
-                // Never remove active streams
-                if (stream instanceof Stream && ((Stream) stream).isActive()) {
-                    continue;
-                }
-
-                if (stream.isClosedFinal()) {
-                    // This stream went from IDLE to CLOSED and is likely to have
-                    // been created by the client as part of the priority tree.
-                    // Candidate for step 3.
-                    candidatesStepThree.add(stream.getIdentifier());
-                } else if (stream.getChildStreams().size() == 0) {
-                    // Prune it
-                    AbstractStream parent = stream.getParentStream();
-                    streams.remove(stream.getIdentifier());
-                    stream.detachFromParent();
-                    if (log.isDebugEnabled()) {
-                        log.debug(sm.getString("upgradeHandler.pruned", connectionId, stream.getIdAsString()));
-                    }
-                    if (--toClose < 1) {
-                        return;
-                    }
 
-                    // If removing this child made the parent childless then see if
-                    // the parent can be removed.
-                    // Don't try and remove Stream 0 as that is the connection
-                    // Don't try and remove 'newer' streams. We'll get to them as we
-                    // work through the ordered list of streams.
-                    while (toClose > 0 && parent.getIdAsInt() > 0 && parent.getIdAsInt() < stream.getIdAsInt() &&
-                            parent.getChildStreams().isEmpty()) {
-                        // This cast is safe since we know parent ID > 0 therefore
-                        // this isn't the connection
-                        stream = (AbstractNonZeroStream) parent;
-                        parent = stream.getParentStream();
-                        streams.remove(stream.getIdentifier());
-                        stream.detachFromParent();
-                        if (log.isDebugEnabled()) {
-                            log.debug(sm.getString("upgradeHandler.pruned", connectionId, stream.getIdAsString()));
-                        }
-                        if (--toClose < 1) {
-                            return;
-                        }
-                        // Also need to remove this stream from the step 2 list
-                        candidatesStepTwo.remove(stream.getIdentifier());
-                    }
-                } else {
-                    // Closed, with children. Candidate for step 2.
-                    candidatesStepTwo.add(stream.getIdentifier());
-                }
-            }
-        }
-
-        // Process the P2 list
-        for (Integer streamIdToRemove : candidatesStepTwo) {
-            removeStreamFromPriorityTree(streamIdToRemove);
-            if (log.isDebugEnabled()) {
-                log.debug(sm.getString("upgradeHandler.pruned", connectionId, streamIdToRemove));
-            }
-            if (--toClose < 1) {
+        // Need to try and prune some streams. Prune streams starting with the
+        // oldest. Pruning stops as soon as enough streams have been pruned.
+        // Iterator is in key order.
+        for (AbstractNonZeroStream stream : streams.values()) {
+            if (toClose < 1) {
                 return;
             }
-        }
-
-        while (toClose > 0 && candidatesStepThree.size() > 0) {
-            Integer streamIdToRemove = candidatesStepThree.pollLast();
-            removeStreamFromPriorityTree(streamIdToRemove);
-            if (log.isDebugEnabled()) {
-                log.debug(sm.getString("upgradeHandler.prunedPriority", connectionId, streamIdToRemove));
+            if (stream instanceof Stream && ((Stream) stream).isActive()) {
+                continue;
             }
-            if (--toClose < 1) {
-                return;
+            streams.remove(stream.getIdentifier());
+            toClose--;
+            if (log.isDebugEnabled()) {
+                log.debug(sm.getString("upgradeHandler.pruned", connectionId, stream.getIdAsString()));
             }
+
         }
 
         if (toClose > 0) {
@@ -1378,31 +1316,6 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
     }
 
 
-    private void removeStreamFromPriorityTree(Integer streamIdToRemove) {
-        synchronized (priorityTreeLock) {
-            AbstractNonZeroStream streamToRemove = streams.remove(streamIdToRemove);
-            // Move the removed Stream's children to the removed Stream's
-            // parent.
-            Set<AbstractNonZeroStream> children = streamToRemove.getChildStreams();
-            if (children.size() == 1) {
-                // Shortcut
-                children.iterator().next().rePrioritise(streamToRemove.getParentStream(), streamToRemove.getWeight());
-            } else {
-                int totalWeight = 0;
-                for (AbstractNonZeroStream child : children) {
-                    totalWeight += child.getWeight();
-                }
-                for (AbstractNonZeroStream child : children) {
-                    children.iterator().next().rePrioritise(streamToRemove.getParentStream(),
-                            streamToRemove.getWeight() * child.getWeight() / totalWeight);
-                }
-            }
-            streamToRemove.detachFromParent();
-            children.clear();
-        }
-    }
-
-
     void push(Request request, Stream associatedStream) throws IOException {
         if (localSettings.getMaxConcurrentStreams() < activeRemoteStreamCount.incrementAndGet()) {
             // If there are too many open streams, simply ignore the push
@@ -1437,12 +1350,6 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
     }
 
 
-    @Override
-    protected final int getWeight() {
-        return 0;
-    }
-
-
     boolean isTrailerHeaderAllowed(String headerName) {
         return allowedTrailerHeaders.contains(headerName);
     }
@@ -1460,7 +1367,8 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
     }
 
 
-    private void increaseOverheadCount(FrameType frameType) {
+    @Override
+    public void increaseOverheadCount(FrameType frameType) {
         // An overhead frame increases the overhead count by
         // overheadCountFactor. By default, this means an overhead frame
         // increases the overhead count by 10. A simple browser request is
@@ -1786,31 +1694,20 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
     }
 
 
-    @Override
+    /**
+     * Unused - NO-OP.
+     *
+     * @param streamId Unused
+     * @param parentStreamId Unused
+     * @param exclusive Unused
+     * @param weight Unused
+     * @throws Http2Exception Never thrown
+     *
+     * @deprecated Unused. Will be removed in Tomcat 11 onwards.
+     */
+    @Deprecated
     public void reprioritise(int streamId, int parentStreamId, boolean exclusive, int weight) throws Http2Exception {
-        if (streamId == parentStreamId) {
-            throw new ConnectionException(
-                    sm.getString("upgradeHandler.dependency.invalid", getConnectionId(), Integer.valueOf(streamId)),
-                    Http2Error.PROTOCOL_ERROR);
-        }
-
-        increaseOverheadCount(FrameType.PRIORITY);
-
-        synchronized (priorityTreeLock) {
-            // Need to look up stream and parent stream inside the lock else it
-            // is possible for a stream to be recycled before it is
-            // reprioritised. This can result in incorrect references to the
-            // non-recycled stream being retained after reprioritisation.
-            AbstractNonZeroStream abstractNonZeroStream = getAbstractNonZeroStream(streamId);
-            if (abstractNonZeroStream == null) {
-                abstractNonZeroStream = createRemoteStream(streamId);
-            }
-            AbstractStream parentStream = getAbstractNonZeroStream(parentStreamId);
-            if (parentStream == null) {
-                parentStream = this;
-            }
-            abstractNonZeroStream.rePrioritise(parentStream, exclusive, weight);
-        }
+        // NO-OP
     }
 
 
@@ -1913,6 +1810,12 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
                             h2e.getError(), stream.getIdAsInt()));
                 }
             }
+        } else if (setting == Setting.NO_RFC7540_PRIORITIES) {
+            // This should not be changed after the initial setting
+            if (value != ConnectionSettingsBase.DEFAULT_NO_RFC7540_PRIORITIES) {
+                throw new ConnectionException(sm.getString("upgradeHandler.enableRfc7450Priorities", connectionId),
+                        Http2Error.PROTOCOL_ERROR);
+            }
         } else {
             remoteSettings.set(setting, value);
         }
@@ -2000,6 +1903,17 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
     }
 
 
+    @Override
+    public void priorityUpdate(int prioritizedStreamID, Priority p) throws Http2Exception {
+        AbstractNonZeroStream abstractNonZeroStream = getAbstractNonZeroStream(prioritizedStreamID, true);
+        if (abstractNonZeroStream instanceof Stream) {
+            Stream stream = (Stream) abstractNonZeroStream;
+            stream.setUrgency(p.getUrgency());
+            stream.setIncremental(p.getIncremental());
+        }
+    }
+
+
     @Override
     public void onSwallowedUnknownFrame(int streamId, int frameTypeId, int flags, int size) throws IOException {
         // NO-OP.
@@ -2007,15 +1921,10 @@ class Http2UpgradeHandler extends AbstractStream implements InternalHttpUpgradeH
 
 
     void replaceStream(AbstractNonZeroStream original, AbstractNonZeroStream replacement) {
-        synchronized (priorityTreeLock) {
-            AbstractNonZeroStream current = streams.get(original.getIdentifier());
-            // Might already have been recycled or removed from the priority
-            // tree entirely. Only replace it if the full stream is still in the
-            // priority tree.
-            if (current instanceof Stream) {
-                streams.put(original.getIdentifier(), replacement);
-                original.replaceStream(replacement);
-            }
+        AbstractNonZeroStream current = streams.get(original.getIdentifier());
+        // Only replace the stream if it currently uses the full implementation.
+        if (current instanceof Stream) {
+            streams.put(original.getIdentifier(), replacement);
         }
     }
 
diff --git a/java/org/apache/coyote/http2/LocalStrings.properties b/java/org/apache/coyote/http2/LocalStrings.properties
index 563d624a34..09faba909b 100644
--- a/java/org/apache/coyote/http2/LocalStrings.properties
+++ b/java/org/apache/coyote/http2/LocalStrings.properties
@@ -27,6 +27,7 @@ connectionSettings.debug=Connection [{0}], Endpoint [{1}], Parameter type [{2}]
 connectionSettings.enablePushInvalid=Connection [{0}], The requested value for enable push [{1}] is not one of the permitted values (zero or one)
 connectionSettings.headerTableSizeLimit=Connection [{0}], Attempted to set a header table size of [{1}] but the limit is 16k
 connectionSettings.maxFrameSizeInvalid=Connection [{0}], The requested maximum frame size of [{1}] is outside the permitted range of [{2}] to [{3}]
+connectionSettings.noRfc7540PrioritiesInvalid=Connection [{0}], The requested no RFC 7540 priorities setting [{1}] was not zero or one
 connectionSettings.unknown=Connection [{0}], An unknown setting with identifier [{1}] and value [{2}] was ignored
 connectionSettings.windowSizeTooBig=Connection [{0}], The requested window size of [{1}] is bigger than the maximum permitted value of [{2}]
 
@@ -71,7 +72,8 @@ http2Parser.processFrameData.window=Connection [{0}], Client sent more data than
 http2Parser.processFrameHeaders.decodingDataLeft=Data left over after HPACK decoding - it should have been consumed
 http2Parser.processFrameHeaders.decodingFailed=There was an error during the HPACK decoding of HTTP headers
 http2Parser.processFrameHeaders.payload=Connection [{0}], Stream [{1}], Processing headers payload of size [{2}]
-http2Parser.processFramePriority.invalidParent=Connection [{0}], Stream [{1}], A stream may not depend on itself
+http2Parser.processFramePriorityUpdate.debug=Connection [{0}], Stream [{1}], Urgency [{2}], Incremental [{3}]
+http2Parser.processFramePriorityUpdate.streamZero=Connection [{0}], Priority update frame received to prioritize stream zero
 http2Parser.processFramePushPromise=Connection [{0}], Stream [{1}], Push promise frames should not be sent by the client
 http2Parser.processFrameSettings.ackWithNonZeroPayload=Settings frame received with the ACK flag set and payload present
 http2Parser.processFrameWindowUpdate.debug=Connection [{0}], Stream [{1}], Window size increment [{2}]
@@ -107,7 +109,6 @@ stream.inputBuffer.swallowUnread=Swallowing [{0}] bytes previously read into inp
 stream.notWritable=Connection [{0}], Stream [{1}], This stream is not writable
 stream.outputBuffer.flush.debug=Connection [{0}], Stream [{1}], flushing output with buffer at position [{2}], writeInProgress [{3}] and closed [{4}]
 stream.recycle=Connection [{0}], Stream [{1}] has been recycled
-stream.reprioritisation.debug=Connection [{0}], Stream [{1}], Exclusive [{2}], Parent [{3}], Weight [{4}]
 stream.reset.fail=Connection [{0}], Stream [{1}], Failed to reset stream
 stream.reset.receive=Connection [{0}], Stream [{1}], Reset received due to [{2}]
 stream.reset.send=Connection [{0}], Stream [{1}], Reset sent due to [{2}]
@@ -125,9 +126,8 @@ streamStateMachine.invalidFrame=Connection [{0}], Stream [{1}], State [{2}], Fra
 
 upgradeHandler.allocate.debug=Connection [{0}], Stream [{1}], allocated [{2}] bytes
 upgradeHandler.allocate.left=Connection [{0}], Stream [{1}], [{2}] bytes unallocated - trying to allocate to children
-upgradeHandler.allocate.recipient=Connection [{0}], Stream [{1}], potential recipient [{2}] with weight [{3}]
 upgradeHandler.connectionError=Connection error
-upgradeHandler.dependency.invalid=Connection [{0}], Stream [{1}], Streams may not depend on themselves
+upgradeHandler.enableRfc7450Priorities=Connection [{0}], RFC 7450 priorities may not be enabled after being disabled in the initial connection settings frame (see RFC 9218)
 upgradeHandler.fallToDebug=\n\
 \ Note: further occurrences of HTTP/2 stream errors will be logged at DEBUG level.
 upgradeHandler.goaway.debug=Connection [{0}], Goaway, Last stream [{1}], Error code [{2}], Debug data [{3}]
@@ -144,7 +144,6 @@ upgradeHandler.prefaceReceived=Connection [{0}], Connection preface received fro
 upgradeHandler.pruneIncomplete=Connection [{0}], Stream [{1}], Failed to fully prune the connection because there are [{2}] too many active streams
 upgradeHandler.pruneStart=Connection [{0}] Starting pruning of old streams. Limit is [{1}] and there are currently [{2}] streams.
 upgradeHandler.pruned=Connection [{0}] Pruned completed stream [{1}]
-upgradeHandler.prunedPriority=Connection [{0}] Pruned unused stream [{1}] that may have been part of the priority tree
 upgradeHandler.releaseBacklog=Connection [{0}], Stream [{1}] released from backlog
 upgradeHandler.rst.debug=Connection [{0}], Stream [{1}], Error [{2}], Message [{3}],  RST (closing stream)
 upgradeHandler.sendPrefaceFail=Connection [{0}], Failed to send preface to client
diff --git a/java/org/apache/coyote/http2/LocalStrings_es.properties b/java/org/apache/coyote/http2/LocalStrings_es.properties
index acd5243110..513511c9ed 100644
--- a/java/org/apache/coyote/http2/LocalStrings_es.properties
+++ b/java/org/apache/coyote/http2/LocalStrings_es.properties
@@ -37,19 +37,16 @@ stream.header.noPath=Conexión [{0}], Flujo [{1}], El [:path] de la seudo cabece
 stream.header.unknownPseudoHeader=Conexión [{0}], Flujo [{1}], Se recibió una Pseudo cabecera desconocida [{2}]
 stream.inputBuffer.reset=Reinicio de flujo
 stream.inputBuffer.signal=Se adicionaron datos al inBuffer cuando el hilo esta esperando. Señalizando al hilo que a continuar
-stream.reprioritisation.debug=Conexión [{0}], Flujo [{1}], Exclusivo [{2}], Padre [{3}], Peso [{4}]
 
 streamProcessor.error.connection=Conexión [{0}], Flujo [{1}], Ha ocurrido un error el procesamiento que fue fatal para la conexión
 
 streamStateMachine.debug.change=Conexión [{0}], Flujo [{1}], Estado cambió de [{2}] a [{3}]
 
 upgradeHandler.allocate.left=Conexión [{0}], Flujo [{1}], [{2}] bytes no asignados -  tratando de asignar en el hijo
-upgradeHandler.allocate.recipient=Conexión [{0}], Flujo [{1}], recipiente potencial [{2}] con peso [{3}]
 upgradeHandler.ioerror=Conexión [{0}]
 upgradeHandler.pingFailed=Conexión [{0}] falló al hacer ping al cliente
 upgradeHandler.prefaceReceived=Conexión [{0}], Pre face de conexión recibida del cliente\n
 upgradeHandler.pruneIncomplete=La conexión [{0}] Falló al podar completamente la conexión porque existen flujos activos / usados en el árbol de priorida. Existen [{2}] muchos flujos
-upgradeHandler.prunedPriority=La conexión [{0}] ha cortado el flujo en desuso [{1}] el cual podía ser parte del árbol prioritario
 upgradeHandler.rst.debug=Conexión [{0}], Flujo [{1}], Error [{2}], Mensaje [{3}],  RST (cerrando flujo)
 upgradeHandler.sendPrefaceFail=La conexión [{0}], Falló al enviar el prefacio al cliente\n
 upgradeHandler.socketCloseFailed=Error cerrando el socket
diff --git a/java/org/apache/coyote/http2/LocalStrings_fr.properties b/java/org/apache/coyote/http2/LocalStrings_fr.properties
index e02dd2ede0..d6066a76ea 100644
--- a/java/org/apache/coyote/http2/LocalStrings_fr.properties
+++ b/java/org/apache/coyote/http2/LocalStrings_fr.properties
@@ -27,6 +27,7 @@ connectionSettings.debug=Connection [{0}], Paramètre type [{1}] mis à [{2}]
 connectionSettings.enablePushInvalid=Connection [{0}], La valeur demandée pour activer le push [{1}] n''est pas une de celles permises (zéro ou un)
 connectionSettings.headerTableSizeLimit=La Connection [{0}] a essayé de configurer une taille de [{1}] pour la table des en-têtes (headers), mais la limite est 16k
 connectionSettings.maxFrameSizeInvalid=Connection [{0}], la taille maximum de trame demandée  [{1}] est en-dehors des limites permises [{2}] - [{3}]
+connectionSettings.noRfc7540PrioritiesInvalid=Connection [{0}], Le paramètre "no RFC 7540 priorities" [{1}] n''était pas zéro ou un
 connectionSettings.unknown=Connection [{0}], Un paramètre inconnu avec l''identifiant [{1}] et la valeur [{2}] a été ignoré
 connectionSettings.windowSizeTooBig=Connection [{0}], La taille de fenêtre demandée [{1}] est plus grande que la valeur maximale autorisée [{2}]
 
@@ -71,7 +72,8 @@ http2Parser.processFrameData.window=Connection [{0}], le client a envoyé plus d
 http2Parser.processFrameHeaders.decodingDataLeft=Des données restent après le décodage de HPACK, elles auraient dû être consommées
 http2Parser.processFrameHeaders.decodingFailed=Une erreur de décodage HPACK des en-têtes HTTP s'est produite
 http2Parser.processFrameHeaders.payload=Connection [{0}], Flux [{1}], Traitement des en-têtes avec une taille de données de [{2}]
-http2Parser.processFramePriority.invalidParent=Connection [{0}], Flux [{1}], Un flux ne peut pas dépendre de lui-même
+http2Parser.processFramePriorityUpdate.debug=Connection [{0}], Stream [{1}], Urgency [{2}], Incremental [{3}]
+http2Parser.processFramePriorityUpdate.streamZero=Connection [{0}], La trame de mise à jour de priorité a été recue pour le flux zéro
 http2Parser.processFramePushPromise=Connexion [{0}], Flux (Stream) [{1}], les trames de promesse d''envoi ("Push promise frames") ne doivent pas être envoyées par le client.
 http2Parser.processFrameSettings.ackWithNonZeroPayload=La trame de paramètres a été reçue avec un indicateur ACK activé et des données présentes
 http2Parser.processFrameWindowUpdate.debug=Connection [{0}], Flux [{1}], Incrémentation de [{2}] de la taille de fenêtre
@@ -107,7 +109,6 @@ stream.inputBuffer.swallowUnread=[{0}] bytes qui ont été auparavant lu dans le
 stream.notWritable=Connection [{0}], Flux [{1}], Impossible d''écrire sur ce flux
 stream.outputBuffer.flush.debug=Connection [{0}], Flux [{1}], envoi des données mises en tampon depuis la position [{2}], writeInProgress [{3}] et closed [{4}]
 stream.recycle=Connection [{0}], Stream [{1}] a été recyclée
-stream.reprioritisation.debug=Connection [{0}], Flux [{1}], Exclusive [{2}], Parent [{3}], Poids [{4}]
 stream.reset.fail=Connection [{0}], Flux [{1}], Echec de réinitialisation du flux
 stream.reset.receive=Connection [{0}], Flux [{1}], Réinitialisation reçue à cause de [{2}]
 stream.reset.send=Connection [{0}], Flux [{1}], Réinitialisation envoyée à cause de [{2}]
@@ -125,9 +126,8 @@ streamStateMachine.invalidFrame=Connection [{0}], Flux [{1}], Etat [{2}], Type d
 
 upgradeHandler.allocate.debug=Connection [{0}], Flux [{1}], [{2}] octets alloués
 upgradeHandler.allocate.left=Connection [{0}], Flux [{1}], [{2}] octets désalloués, essai d''allocation aux enfants
-upgradeHandler.allocate.recipient=Connection [{0}], Flux [{1}], receveur potentiel [{2}] avec poids [{3}]
 upgradeHandler.connectionError=Erreur de la connection
-upgradeHandler.dependency.invalid=Connection [{0}], Flux [{1}], Un flux ne peut dépendre de lui-même
+upgradeHandler.enableRfc7450Priorities=Connection [{0}], les priorités RFC 7450 ne doivent pas être activées après avoir été désactivées dans la trame initiale des paramètres de connection (voir la RFC 9218)
 upgradeHandler.fallToDebug=\n\
 \ Note: les occurrences suivantes d'erreurs de stream HTTP/2 seront enregistrées au niveau DEBUG.
 upgradeHandler.goaway.debug=Connection [{0}], Goaway, Dernier flux [{1}], Code d''erreur [{2}], Données de débogage [{3}]
@@ -144,7 +144,6 @@ upgradeHandler.prefaceReceived=Connection [{0}], préface de la connection recue
 upgradeHandler.pruneIncomplete=Connexion [{0}], Flux [{1}], Erreur lors de l''élimination complète de la connexion parce que des flux sont encore actifs / utilisés dans l''arbre de priorité, il y a [{2}] flux en trop
 upgradeHandler.pruneStart=Connection [{0}] Début de l''élimination des anciens flux, la limite est de [{1}] et il y a actuellement [{2}] flux
 upgradeHandler.pruned=Connection [{0}] Elimination du flux terminé [{1}]
-upgradeHandler.prunedPriority=La connexion [{0}] a élagué le flux inutilisé [{1}] qui faisait peut-être partie de l''arbre de priorité
 upgradeHandler.releaseBacklog=Connection [{0}], Flux [{1}] enlevée de la file d''attente
 upgradeHandler.rst.debug=Connexion [{0}], Flux [{1}], Erreur [{2}], Message [{3}], RST (fermeture du flux)
 upgradeHandler.sendPrefaceFail=Connexion [{0}], échec d''envoi de la préface au client
diff --git a/java/org/apache/coyote/http2/LocalStrings_ja.properties b/java/org/apache/coyote/http2/LocalStrings_ja.properties
index 8f840c32c6..af3a3e2553 100644
--- a/java/org/apache/coyote/http2/LocalStrings_ja.properties
+++ b/java/org/apache/coyote/http2/LocalStrings_ja.properties
@@ -26,6 +26,7 @@ connectionSettings.debug=コネクション [{0}]、パラメータ [{1}] に [{
 connectionSettings.enablePushInvalid=コネクション[{0}]、有効プッシュ[{1}]にリクエストされた値が許容値(0または1)のいずれでもありません。
 connectionSettings.headerTableSizeLimit=コネクション [{0}]、ヘッダーテーブルサイズに [{1}] を指定されましたが上限は 16k です。
 connectionSettings.maxFrameSizeInvalid=コネクション [{0}]、要求された最大フレームサイズ [{1}] は可能な範囲の [{2}] から [{3}] を超えています。
+connectionSettings.noRfc7540PrioritiesInvalid=接続 [{0}] では、要求された RFC 7540 優先度設定 [{1}] が 0 でも 1 でもありませんでした
 connectionSettings.unknown=コネクション [{0}]、未知の設定名 [{1}] の値 [{2}] を無視しました。
 connectionSettings.windowSizeTooBig=コネクション [{0}]、要求されたウインドウサイズ [{1}] は上限値 [{2}] を越えています。
 
@@ -70,7 +71,8 @@ http2Parser.processFrameData.window=コネクション [{0}]、クライアン
 http2Parser.processFrameHeaders.decodingDataLeft=HPAC をデコードしたのにデータが残っています。すべて使用するべきです
 http2Parser.processFrameHeaders.decodingFailed=HTTP ヘッダーの HPACK 復号化中にエラーが発生しました。
 http2Parser.processFrameHeaders.payload=コネクション [{0}]、ストリーム [{1}]、サイズ [{2}] のヘッダーペイロードを処理中
-http2Parser.processFramePriority.invalidParent=コネクション [{0}]、ストリーム [{1}]、ストリーム自体に依存しない可能性があります
+http2Parser.processFramePriorityUpdate.debug=接続 [{0}]、ストリーム [{1}]、緊急度 [{2}]、増分 [{3}]
+http2Parser.processFramePriorityUpdate.streamZero=接続 [{0}] は、ストリーム 0 を優先するための優先更新フレームを受信しました
 http2Parser.processFramePushPromise=コネクション [{0}]、ストリーム [{1}]、クライアントから PUSH_PROMISE フレームを送信するべきではありません
 http2Parser.processFrameSettings.ackWithNonZeroPayload=ACKフラグがセットされ、ペイロードが存在する状態で受信されたSettingsフレーム
 http2Parser.processFrameWindowUpdate.debug=コネクション [{0}]、ストリーム [{1}]、ウインドウサイズを [{2}] に拡大します。
@@ -106,7 +108,6 @@ stream.inputBuffer.swallowUnread=以前に入力ストリームバッファに
 stream.notWritable=コネクション [{0}]、ストリーム [{1}]、このストリームには書き込みできません。
 stream.outputBuffer.flush.debug=コネクション [{0}]、ストリーム [{1}]、バッファポジション [{2}]で出力をフラッシュ、writeInProgress [{3}]、クローズ [{4}]
 stream.recycle=Connection[{0}]、Stream[{1}]はリサイクルされました
-stream.reprioritisation.debug=コネクション [{0}]、ストリーム [{1}]、排他 [{2}]、Parent [{3}]、重み [{4}]
 stream.reset.fail=コネクション [{0}]、ストリーム [{1}]、ストリームをリセットできません。
 stream.reset.receive=コネクション [{0}]、ストリーム [{1}]、[{2}] のために受信されたリセット
 stream.reset.send=コネクション [{0}]、ストリーム [{1}]、[{2}] が原因で RESET を送信しました。
@@ -124,9 +125,8 @@ streamStateMachine.invalidFrame=コネクション [{0}]、ストリーム [{1}]
 
 upgradeHandler.allocate.debug=コネクション [{0}]、ストリーム [{1}]、割り当てられた [{2}] バイト
 upgradeHandler.allocate.left=コネクション [{0}]、ストリーム [{1}]、[{2}] バイトが未割り当て - 子への割り当てを試みています
-upgradeHandler.allocate.recipient=コネクション [{0}]、ストリーム [{1}]、重み [{3}] の潜在的な受信者 [{2}]
 upgradeHandler.connectionError=接続エラー
-upgradeHandler.dependency.invalid=コネクション [{0}]、ストリーム [{1}]、ストリームは自分自身に依存するべきではありません。
+upgradeHandler.enableRfc7450Priorities=接続 [{0}] は、RFC 7450 優先順位が初期接続設定フレームで無効にされた後に有効にならない場合があります (RFC 9218 を参照)
 upgradeHandler.fallToDebug=注: HTTP/2 ストリームのエラーがさらに発生すると、DEBUG レベルでログに記録されます。
 upgradeHandler.goaway.debug=コネクション [{0}]、Goaway、最終ストリーム [{1}]、エラーコード [{2}]、デバッグデータ [{3}]
 upgradeHandler.init=コネクション[{0}]、状態[{1}]
@@ -142,7 +142,6 @@ upgradeHandler.prefaceReceived=コネクション [{0}]、クライアントか
 upgradeHandler.pruneIncomplete=コネクション [{0}]、ストリーム [{1}]、コネクションを削除できませんでした。アクティブなストリーム数 [{2}] は多すぎます。
 upgradeHandler.pruneStart=コネクション [{0}] 古いストリームのプルーニングを開始します。 上限は [{1}]  で、現在 [{2}] ストリームがあります。
 upgradeHandler.pruned=コネクション [{0}]、完了したストリーム [{1}] は削除します。
-upgradeHandler.prunedPriority=コネクション [{0}]、優先度木に登録されていた可能性のある未使用のストリーム [{1}] を取り除きました。
 upgradeHandler.releaseBacklog=コネクション [{0}]、ストリーム [{1}] はバックログから解放されました
 upgradeHandler.rst.debug=コネクション [{0}]、ストリーム [{1}]、エラー [{2}]、メッセージ [{3}]、RST (ストリームを切断します)
 upgradeHandler.sendPrefaceFail=コネクション [{0}]、クライアントにプリフェイスを送信できませんでした。
diff --git a/java/org/apache/coyote/http2/LocalStrings_ko.properties b/java/org/apache/coyote/http2/LocalStrings_ko.properties
index 20878d70d0..669271e1c3 100644
--- a/java/org/apache/coyote/http2/LocalStrings_ko.properties
+++ b/java/org/apache/coyote/http2/LocalStrings_ko.properties
@@ -71,7 +71,6 @@ http2Parser.processFrameData.window=연결 [{0}]: 클라이언트가 스트림 
 http2Parser.processFrameHeaders.decodingDataLeft=HPACK 디코딩 후 남아있는 데이터 - 반드시 소비되었어야 합니다.
 http2Parser.processFrameHeaders.decodingFailed=HTTP 헤더들의 HPACK 디코딩 과정에서 오류가 있었습니다.
 http2Parser.processFrameHeaders.payload=연결 [{0}], 스트림 [{1}], 크기가 [{2}]인 헤더들의 payload를 처리합니다.
-http2Parser.processFramePriority.invalidParent=연결 [{0}], 스트림 [{1}], 스트림이 그 자신을 의존할 수는 없습니다.
 http2Parser.processFramePushPromise=연결 [{0}], 스트림 [{1}], Push promise 프레임들이 클라이언트에 의해 전송되어서는 안됩니다.
 http2Parser.processFrameSettings.ackWithNonZeroPayload=ACK 플래그가 설정되고 payload가 존재하는, Settings 프레임을 받았습니다.
 http2Parser.processFrameWindowUpdate.debug=연결 [{0}], 스트림 [{1}], 윈도우 크기를 [{2}] 만큼 증가 시킵니다.
@@ -107,7 +106,6 @@ stream.inputBuffer.swallowUnread=이전에 읽어 입력 스트림 버퍼에 넣
 stream.notWritable=연결 [{0}], 스트림 [{1}], 이 스트림은 쓰기 가능하지 않습니다.
 stream.outputBuffer.flush.debug=연결 [{0}], 스트림 [{1}], 위치 [{2}]의 버퍼를 출력으로 배출합니다. 쓰기 진행 중 여부: [{3}],닫힘 여부: [{4}]
 stream.recycle=연결 [{0}], 스트림 [{1}]이(가) 참조 해제되었습니다.
-stream.reprioritisation.debug=연결 [{0}], 스트림 [{1}], 배타성 [{2}], 부모 [{3}], 가중치 [{4}]
 stream.reset.fail=연결 [{0}], 스트림 [{1}], 스트림을 재설정(reset)하지 못했습니다.
 stream.reset.receive=연결 [{0}], 스트림 [{1}], [{2}](으)로 인해 재설정(reset)을 받았습니다.
 stream.reset.send=연결 [{0}], 스트림 [{1}], [{2}](으)로 인하여 재설정(reset)이 전송되었음.
@@ -125,9 +123,7 @@ streamStateMachine.invalidFrame=연결 [{0}], 스트림 [{1}], 상태 [{2}], 프
 
 upgradeHandler.allocate.debug=연결 [{0}], 스트림 [{1}], [{2}] 바이트를 할당함.
 upgradeHandler.allocate.left=연결 [{0}], 스트림 [{1}], [{2}] 바이트들이 할당 해제되었습니다 - 자식들에 할당하려 시도합니다.
-upgradeHandler.allocate.recipient=연결 [{0}], 스트림 [{1}], 가중치 [{3}]의 잠재적 수신자 [{2}]
 upgradeHandler.connectionError=연결 오류
-upgradeHandler.dependency.invalid=연결 [{0}], 스트림 [{1}], 스트림들은 자기 자신들에 의존해서는 안됩니다.
 upgradeHandler.fallToDebug=\n\
 \ 주의: 추가로 발생하는 HTTP/2 스트림 오류들은 디버그 수준의 로그로 기록될 것입니다.
 upgradeHandler.goaway.debug=연결 [{0}], Goaway, 마지막 스트림 [{1}], 오류 코드 [{2}], 디버그 데이터 [{3}]
@@ -144,7 +140,6 @@ upgradeHandler.prefaceReceived=연결 [{0}]: 연결 preface를 클라이언트
 upgradeHandler.pruneIncomplete=연결 [{0}]: 스트림들이 Priority tree에서 활성화되어 있거나 사용되고 있기 때문에, 해당 연결을 완전히 제거하지 못했습니다. 너무 많은 스트림들이 존재합니다: [{2}].
 upgradeHandler.pruneStart=연결 [{0}]: 이전 스트림들에 대한 가지치기를 시작합니다. 한계값은 [{1}] 이고, 현재 [{2}]개의 스트림들이 존재합니다.
 upgradeHandler.pruned=연결 [{0}]이(가) 완료된 스트림 [{1}]을(를) 제거했습니다.
-upgradeHandler.prunedPriority=연결 [{0}]이(가) 사용되지 않는 스트림 [{1}]을(를) 제거합니다. 해당 스트림은 priority tree의 일부였을 수 있습니다.
 upgradeHandler.releaseBacklog=연결 [{0}], 스트림 [{1}]이(가) 백로그로부터 해제되었습니다.
 upgradeHandler.rst.debug=연결 [{0}], 스트림 [{1}], 오류 [{2}], 메시지 [{3}],  RST (스트림을 닫습니다)
 upgradeHandler.sendPrefaceFail=연결 [{0}]: 클라이언트에 preface를 전송하지 못했습니다.
diff --git a/java/org/apache/coyote/http2/LocalStrings_zh_CN.properties b/java/org/apache/coyote/http2/LocalStrings_zh_CN.properties
index 0675da3058..16abac0016 100644
--- a/java/org/apache/coyote/http2/LocalStrings_zh_CN.properties
+++ b/java/org/apache/coyote/http2/LocalStrings_zh_CN.properties
@@ -70,7 +70,6 @@ http2Parser.processFrameData.window=连接[{0}],客户端发送的数据比流
 http2Parser.processFrameHeaders.decodingDataLeft=数据在HPACK解码后依然保留 - 它本应该被消费掉
 http2Parser.processFrameHeaders.decodingFailed=对HTTP头进行HPACK解码时出错
 http2Parser.processFrameHeaders.payload=连接:[{0}],流:[{1}],正在处理[{1}]大小的头文件负载
-http2Parser.processFramePriority.invalidParent=连接[{0}],流[{1}],流可能不依赖于自身
 http2Parser.processFramePushPromise=请求了新的远程流ID[{0}],但所有远程流都必须使用奇数标识符
 http2Parser.processFrameSettings.ackWithNonZeroPayload=接收到带有ACK标志设置和有效负载的设置帧
 http2Parser.processFrameWindowUpdate.debug=连接[{0}],流[{1}],窗口大小增量[{2}]
@@ -106,7 +105,6 @@ stream.inputBuffer.swallowUnread=先前读取到输入流缓冲区吞入了[{0}]
 stream.notWritable=连接[{0}],流[{1}],此流不可写
 stream.outputBuffer.flush.debug=连接[{0}],流[{1}],用缓冲区在位置[{2}]刷新输出,writeInProgress[{3}]并关闭了[{4}]
 stream.recycle=连接[{0}],流[{1}]将被回收
-stream.reprioritisation.debug=连接[{0}],流[{1}],独占[{2}],父[{3}],权重[{4}]
 stream.reset.fail=连接[{0}],流[{1}],重置流失败
 stream.reset.receive=连接{0},流{1},由于{2}而收到重置
 stream.reset.send=连接[{0}],流[{1}],由于[{2}]将重置发送
@@ -124,9 +122,7 @@ streamStateMachine.invalidFrame=连接{0}、流{1}、状态{2}、帧类型{3}
 
 upgradeHandler.allocate.debug=连接[{0}],流[{1}],已分配[{2}]字节
 upgradeHandler.allocate.left=连接[{0}],流[{1}],[{2}]字节未分配 - 尝试分配给子项
-upgradeHandler.allocate.recipient=(:连接[{0}],流[{1}],潜在接收者[{2}],权重为[{3}]
 upgradeHandler.connectionError=连接错误
-upgradeHandler.dependency.invalid=连接{0},流{1},流可能不依赖于自身
 upgradeHandler.fallToDebug=注意:往后出现 HTTP/2 流的错误将以 DEBUG 日志级别输出。
 upgradeHandler.goaway.debug=连接[{0}],离开,最后的流[{1}],错误码[{2}],调试数据[{3}]
 upgradeHandler.init=连接[{0}],状态[{1}]
@@ -142,7 +138,6 @@ upgradeHandler.prefaceReceived=连接[{0}],从客户端收到连接准备。
 upgradeHandler.pruneIncomplete=连接[{0}],流[{1}],无法完全修剪连接,因为有[{2}]个活动流太多
 upgradeHandler.pruneStart=连接[{0}]正在开始修剪旧流。限制为[{1}],当前有[{2}]个流。
 upgradeHandler.pruned=连接[{0}]已修剪完成的流[{1}]
-upgradeHandler.prunedPriority=连接[{0}]已经成为了属于优先级树中未使用的流[{1}]
 upgradeHandler.releaseBacklog=连接[{0}],流[{1}]已从待办事项列表中释放
 upgradeHandler.rst.debug=连接[{0}],流[{1}],错误[{2}],消息[{3}],RST(关闭流)
 upgradeHandler.sendPrefaceFail=连接[{0}],给客户端发送前言失败
diff --git a/java/org/apache/coyote/http2/Setting.java b/java/org/apache/coyote/http2/Setting.java
index 723600e861..f0cd6c2533 100644
--- a/java/org/apache/coyote/http2/Setting.java
+++ b/java/org/apache/coyote/http2/Setting.java
@@ -23,6 +23,7 @@ enum Setting {
     INITIAL_WINDOW_SIZE(4),
     MAX_FRAME_SIZE(5),
     MAX_HEADER_LIST_SIZE(6),
+    NO_RFC7540_PRIORITIES(9),
     UNKNOWN(Integer.MAX_VALUE);
 
     private final int id;
@@ -60,6 +61,9 @@ enum Setting {
             case 6: {
                 return MAX_HEADER_LIST_SIZE;
             }
+            case 9: {
+                return NO_RFC7540_PRIORITIES;
+            }
             default: {
                 return Setting.UNKNOWN;
             }
diff --git a/java/org/apache/coyote/http2/Stream.java b/java/org/apache/coyote/http2/Stream.java
index 97bf8656af..40ec7319ad 100644
--- a/java/org/apache/coyote/http2/Stream.java
+++ b/java/org/apache/coyote/http2/Stream.java
@@ -17,6 +17,7 @@
 package org.apache.coyote.http2;
 
 import java.io.IOException;
+import java.io.StringReader;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.security.AccessController;
@@ -43,6 +44,7 @@ import org.apache.tomcat.util.buf.ByteChunk;
 import org.apache.tomcat.util.buf.MessageBytes;
 import org.apache.tomcat.util.http.MimeHeaders;
 import org.apache.tomcat.util.http.parser.Host;
+import org.apache.tomcat.util.http.parser.Priority;
 import org.apache.tomcat.util.net.ApplicationBufferHandler;
 import org.apache.tomcat.util.net.WriteBuffer;
 import org.apache.tomcat.util.res.StringManager;
@@ -96,6 +98,9 @@ class Stream extends AbstractNonZeroStream implements HeaderEmitter {
     private Object pendingWindowUpdateForStreamLock = new Object();
     private int pendingWindowUpdateForStream = 0;
 
+    private volatile int urgency = Priority.DEFAULT_URGENCY;
+    private volatile boolean incremental = Priority.DEFAULT_INCREMENTAL;
+
 
     Stream(Integer identifier, Http2UpgradeHandler handler) {
         this(identifier, handler, null);
@@ -105,7 +110,6 @@ class Stream extends AbstractNonZeroStream implements HeaderEmitter {
     Stream(Integer identifier, Http2UpgradeHandler handler, Request coyoteRequest) {
         super(handler.getConnectionId(), identifier);
         this.handler = handler;
-        handler.addChild(this);
         setWindowSize(handler.getRemoteSettings().getInitialWindowSize());
         if (coyoteRequest == null) {
             // HTTP/2 new request
@@ -415,6 +419,16 @@ class Stream extends AbstractNonZeroStream implements HeaderEmitter {
                 }
                 break;
             }
+            case "priority": {
+                try {
+                    Priority p = Priority.parsePriority(new StringReader(value));
+                    setUrgency(p.getUrgency());
+                    setIncremental(p.getIncremental());
+                } catch (IOException ioe) {
+                    // Not possible with StringReader
+                }
+                break;
+            }
             default: {
                 if (headerState == HEADER_STATE_TRAILER && !handler.isTrailerHeaderAllowed(name)) {
                     break;
@@ -792,6 +806,26 @@ class Stream extends AbstractNonZeroStream implements HeaderEmitter {
     }
 
 
+    public int getUrgency() {
+        return urgency;
+    }
+
+
+    public void setUrgency(int urgency) {
+        this.urgency = urgency;
+    }
+
+
+    public boolean getIncremental() {
+        return incremental;
+    }
+
+
+    public void setIncremental(boolean incremental) {
+        this.incremental = incremental;
+    }
+
+
     private static void push(final Http2UpgradeHandler handler, final Request request, final Stream stream)
             throws IOException {
         if (org.apache.coyote.Constants.IS_SECURITY_ENABLED) {
diff --git a/java/org/apache/tomcat/util/http/parser/LocalStrings.properties b/java/org/apache/tomcat/util/http/parser/LocalStrings.properties
index 4e6f7b9cb5..3921cb2646 100644
--- a/java/org/apache/tomcat/util/http/parser/LocalStrings.properties
+++ b/java/org/apache/tomcat/util/http/parser/LocalStrings.properties
@@ -45,3 +45,17 @@ http.tooFewHextets=An IPv6 address must consist of 8 hextets but this address co
 http.tooManyColons=An IPv6 address may not contain more than 2 sequential colon characters.
 http.tooManyDoubleColons=An IPv6 address may only contain a single '::' sequence.
 http.tooManyHextets=The IPv6 address contains [{0}] hextets but a valid IPv6 address may not have more than 8.
+
+sf.bareitem.invalidCharacter=The invalid character [{0}] was found parsing when start of a bare item
+sf.base64.invalidCharacter=The [{0}] character is not valid inside a base64 sequence
+sf.boolean.invalidCharacter=The [{0}] character is not a valid boolean value
+sf.invalidCharacter=The [{0}] character is not valid here
+sf.key.invalidFirstCharacter=The invalid character [{0}] was found parsing when start of a key
+sf.numeric.decimalInvalidFinal=The final character of a decimal value must be a digit
+sf.numeric.decimalPartTooLong=More than 3 digits after the decimal point
+sf.numeric.decimalTooLong=More than 16 characters found in a decimal
+sf.numeric.integerTooLong=More than 15 digits found in an integer
+sf.numeric.integralPartTooLong=More than 12 digits found in the integral part of a decimal
+sf.numeric.invalidCharacter=The invalid character [{0}] was found parsing a numeric value where a digit was expected
+sf.string.invalidCharacter=The [{0}] character is not valid inside a string
+sf.string.invalidEscape=The [{0}] character must not be escaped
diff --git a/java/org/apache/tomcat/util/http/parser/LocalStrings_fr.properties b/java/org/apache/tomcat/util/http/parser/LocalStrings_fr.properties
index 88eac1e59b..a954343891 100644
--- a/java/org/apache/tomcat/util/http/parser/LocalStrings_fr.properties
+++ b/java/org/apache/tomcat/util/http/parser/LocalStrings_fr.properties
@@ -44,3 +44,17 @@ http.tooFewHextets=Une adresse IPv6 doit être constitué de 8 groupes de 4 octe
 http.tooManyColons=Une adresse IPv6 ne peut pas contenir plus de deux caractères deux-points à la suite
 http.tooManyDoubleColons=Une adresse IPv6 ne peut contenir qu'une seule séquence "::"
 http.tooManyHextets=L''adresse IPv6 contient [{0}] groupes de 4 octets mais une adresse IPv6 valide ne doit pas en avoir plus de 8
+
+sf.bareitem.invalidCharacter=Le caractère [{0}] invalide a été rencontré en début d''un objet
+sf.base64.invalidCharacter=Le caractère [{0}] est invalide dans une séquence base64
+sf.boolean.invalidCharacter=Le caractère [{0}] n''est pas une valeur booléene valide
+sf.invalidCharacter=Le caractère [{0}] n''est pas valide à cet endroit
+sf.key.invalidFirstCharacter=Le caractère [{0}] invalide a été rencontré au début d''une clé
+sf.numeric.decimalInvalidFinal=Le caractère final d'une valeur décimale doit être un chiffre
+sf.numeric.decimalPartTooLong=Plus de 3 chiffres après le point des décimales
+sf.numeric.decimalTooLong=Plus de 16 caractères trouvés dans une décimale
+sf.numeric.integerTooLong=Plus de 15 chiffres dans un entier
+sf.numeric.integralPartTooLong=Plus de 12 chiffres trouvés dans la partie entière d'une décimale
+sf.numeric.invalidCharacter=Le caractère [{0}] invalide a été trouvé en traitant une valeur numérique alors qu''un chiffre était attendu
+sf.string.invalidCharacter=Le caractère [{0}] n''est pas valide dans une chaîne de caractères
+sf.string.invalidEscape=Le caractère [{0}] ne doit pas être échappé
diff --git a/java/org/apache/tomcat/util/http/parser/LocalStrings_ja.properties b/java/org/apache/tomcat/util/http/parser/LocalStrings_ja.properties
index 5b61d86fe6..b9bf412a57 100644
--- a/java/org/apache/tomcat/util/http/parser/LocalStrings_ja.properties
+++ b/java/org/apache/tomcat/util/http/parser/LocalStrings_ja.properties
@@ -44,3 +44,17 @@ http.tooFewHextets=IPv6 アドレスは 8 個のヘクステットで構成し
 http.tooManyColons=IPv6 アドレスでは文字 : を 2 つ以上連続することはできません。
 http.tooManyDoubleColons=IPv6アドレスは単一の '::'シーケンスのみを含むことができます。
 http.tooManyHextets=IPv6 アドレスは [{0}] ヘクステットで構成されていますが、正常な IPv6 アドレスなら 8 ヘクステット以上になりません。
+
+sf.bareitem.invalidCharacter=ベアアイテムの開始を解析中に無効な文字 [{0}] が見つかりました
+sf.base64.invalidCharacter=文字 [{0}] は base64 シーケンス内では無効です
+sf.boolean.invalidCharacter=文字 [{0}] は有効なブール値ではありません
+sf.invalidCharacter=文字 [{0}] はここでは無効です
+sf.key.invalidFirstCharacter=キーの開始を解析中に無効な文字 [{0}] が見つかりました
+sf.numeric.decimalInvalidFinal=10 進数値の最後の文字は数字でなければなりません
+sf.numeric.decimalPartTooLong=小数点以下が 3 桁以上
+sf.numeric.decimalTooLong=16 桁以上の 10 進数が見つかりました
+sf.numeric.integerTooLong=整数に 15 桁を超える数字が含まれています
+sf.numeric.integralPartTooLong=10 進数の整数部分に 12 桁を超える数字が含まれています
+sf.numeric.invalidCharacter=数値の解析中に数値ではない無効な文字 [{0}] が検出されました
+sf.string.invalidCharacter=文字 [{0}] は文字列内では無効です
+sf.string.invalidEscape=文字 [{0}] はエスケープできません
diff --git a/java/org/apache/tomcat/util/http/parser/LocalStrings_ko.properties b/java/org/apache/tomcat/util/http/parser/LocalStrings_ko.properties
index 2970d1c60e..3ee426b142 100644
--- a/java/org/apache/tomcat/util/http/parser/LocalStrings_ko.properties
+++ b/java/org/apache/tomcat/util/http/parser/LocalStrings_ko.properties
@@ -44,3 +44,17 @@ http.tooFewHextets=IPv6 주소는 반드시 8개의 헥스텟(hextet)들로 이
 http.tooManyColons=IPv6 주소는 연속으로 두 개를 초과한 콜론 문자('':'')들을 포함할 수 없습니다.
 http.tooManyDoubleColons=IPv6 주소는 단일한 '::' 시퀀스만을 포함해야 합니다.
 http.tooManyHextets=IPv6 주소가 [{0}]개의 헥스텟(hextet)들을 포함하고 있지만, 유효한 IPv6 주소는 8개를 초과할 수 없습니다.
+
+sf.bareitem.invalidCharacter=단순 항목 값을 파싱하는 중 시작 문자로 유효하지 않은 문자 [{0}](이)가 발견되었습니다.
+sf.base64.invalidCharacter=Base64 문자열 내에 유효하지 않은 문자 [{0}].
+sf.boolean.invalidCharacter=불리언 값으로 유효하지 않은 문자 [{0}].
+sf.invalidCharacter=문자 [{0}](은)는 여기서 유효하지 않습니다.
+sf.key.invalidFirstCharacter=키의 시작 문자로 유효하지 않은 문자 [{0}](이)가 발견되었습니다.
+sf.numeric.decimalInvalidFinal=실수 값 지정 시 맨 마지막 문자는 숫자여야 합니다.
+sf.numeric.decimalPartTooLong=십진 소수점 이후로 숫자 3개를 초과했습니다.
+sf.numeric.decimalTooLong=실수 값을 지정하는 문자열 값에 문자 개수 16개를 초과했습니다.
+sf.numeric.integerTooLong=정수 내에 숫자가 15개를 초과했습니다.
+sf.numeric.integralPartTooLong=실수의 정수 부분 내에 숫자가 12개를 초과했습니다.
+sf.numeric.invalidCharacter=숫자 값을 파싱하는 중, 숫자가 기대되는 곳에서 유효하지 않은 문자 [{0}](이)가 발견되었습니다.
+sf.string.invalidCharacter=문자열 내에 유효하지 않은 문자 [{0}].
+sf.string.invalidEscape=문자 [{0}](은)는 부호화되어서는 안됩니다.
diff --git a/java/org/apache/tomcat/util/http/parser/LocalStrings_zh_CN.properties b/java/org/apache/tomcat/util/http/parser/LocalStrings_zh_CN.properties
index 8b44e84838..e9a4412b46 100644
--- a/java/org/apache/tomcat/util/http/parser/LocalStrings_zh_CN.properties
+++ b/java/org/apache/tomcat/util/http/parser/LocalStrings_zh_CN.properties
@@ -44,3 +44,11 @@ http.tooFewHextets=一个IPv6地址必须包含8个16进制数,但是这个IP
 http.tooManyColons=IPv6地址不能包含超过2个连续冒号字符。
 http.tooManyDoubleColons=一个IPv6地址只能包含一个 '::' 序列。
 http.tooManyHextets=IPv6地址包含[{0}]个十六进制数,但有效的IPv6地址不能超过8个。
+
+sf.base64.invalidCharacter=[{0}]字符在base64序列中无效
+sf.boolean.invalidCharacter=[{0}]字符不是有效布尔值
+sf.invalidCharacter=[{0}]字符在这里无效
+sf.numeric.decimalInvalidFinal=十进制最后一个字符必须是数字
+sf.numeric.decimalPartTooLong=小数点后超过3位
+sf.string.invalidCharacter=字符串中[{0}]字符无效
+sf.string.invalidEscape=[{0}]字符不能被转义
diff --git a/java/org/apache/tomcat/util/http/parser/Priority.java b/java/org/apache/tomcat/util/http/parser/Priority.java
new file mode 100644
index 0000000000..ce8ec3b8e0
--- /dev/null
+++ b/java/org/apache/tomcat/util/http/parser/Priority.java
@@ -0,0 +1,92 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.tomcat.util.http.parser;
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.tomcat.util.http.parser.StructuredField.SfBoolean;
+import org.apache.tomcat.util.http.parser.StructuredField.SfDictionary;
+import org.apache.tomcat.util.http.parser.StructuredField.SfInteger;
+import org.apache.tomcat.util.http.parser.StructuredField.SfListMember;
+
+/**
+ * HTTP priority header parser as per RFC 9218.
+ */
+public class Priority {
+
+    public static final int DEFAULT_URGENCY = 3;
+    public static final boolean DEFAULT_INCREMENTAL = false;
+
+    // Explicitly set the defaults as per RFC 9218
+    private int urgency = DEFAULT_URGENCY;
+    private boolean incremental = DEFAULT_INCREMENTAL;
+
+    public Priority() {
+        // Default constructor is NO-OP.
+    }
+
+    public int getUrgency() {
+        return urgency;
+    }
+
+    public void setUrgency(int urgency) {
+        this.urgency = urgency;
+    }
+
+    public boolean getIncremental() {
+        return incremental;
+    }
+
+    public void setIncremental(boolean incremental) {
+        this.incremental = incremental;
+    }
+
+
+    /**
+     * Parsers an HTTP header as a Priority header as defined by RFC 9218.
+     *
+     * @param input The header to parse
+     *
+     * @return The resulting priority
+     *
+     * @throws IOException If an I/O error occurs while reading the input
+     */
+    public static Priority parsePriority(Reader input) throws IOException {
+        Priority result = new Priority();
+
+        SfDictionary dictionary = StructuredField.parseSfDictionary(input);
+
+        SfListMember urgencyListMember = dictionary.getDictionaryMember("u");
+        // If not an integer, ignore it
+        if (urgencyListMember instanceof SfInteger) {
+            long urgency = ((SfInteger) urgencyListMember).getVaue().longValue();
+            // If out of range, ignore it
+            if (urgency > -1 && urgency < 8) {
+                result.setUrgency((int) urgency);
+            }
+        }
+
+        SfListMember incrementalListMember = dictionary.getDictionaryMember("i");
+        // If not a boolean, ignore it
+        if (incrementalListMember instanceof SfBoolean) {
+            result.setIncremental(((SfBoolean) incrementalListMember).getVaue().booleanValue());
+        }
+
+        return result;
+    }
+}
diff --git a/java/org/apache/tomcat/util/http/parser/StructuredField.java b/java/org/apache/tomcat/util/http/parser/StructuredField.java
new file mode 100644
index 0000000000..ab31edfe99
--- /dev/null
+++ b/java/org/apache/tomcat/util/http/parser/StructuredField.java
@@ -0,0 +1,598 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.tomcat.util.http.parser;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.tomcat.util.codec.binary.Base64;
+import org.apache.tomcat.util.res.StringManager;
+
+/**
+ * Parsing of structured fields as per RFC 8941.
+ * <p>
+ * The parsing implementation is complete but not all elements are currently exposed via getters. Additional getters
+ * will be added as required as the use of structured fields expands.
+ * <p>
+ * The serialization of structured fields has not been implemented.
+ */
+public class StructuredField {
+
+    private static final StringManager sm = StringManager.getManager(StructuredField.class);
+
+    private static final int ARRAY_SIZE = 128;
+
+    private static final boolean[] IS_KEY_FIRST = new boolean[ARRAY_SIZE];
+    private static final boolean[] IS_KEY = new boolean[ARRAY_SIZE];
+    private static final boolean[] IS_OWS = new boolean[ARRAY_SIZE];
+    private static final boolean[] IS_BASE64 = new boolean[ARRAY_SIZE];
+    private static final boolean[] IS_TOKEN = new boolean[ARRAY_SIZE];
+
+    static {
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            if (i == '*' || i >= 'a' && i <= 'z') {
+                IS_KEY_FIRST[i] = true;
+                IS_KEY[i] = true;
+            } else if (i >= '0' && i <= '9' || i == '_' || i == '-' || i == '.') {
+                IS_KEY[i] = true;
+            }
+        }
+
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            if (i == 9 || i == ' ') {
+                IS_OWS[i] = true;
+            }
+        }
+
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            if (i == '+' || i == '/' || i >= '0' && i <= '9' || i == '=' || i >= 'A' && i <= 'Z' ||
+                    i >= 'a' && i <= 'z') {
+                IS_BASE64[i] = true;
+            }
+        }
+
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            if (HttpParser.isToken(i) || i == ':' || i == '/') {
+                IS_TOKEN[i] = true;
+            }
+        }
+    }
+
+
+    static SfList parseSfList(Reader input) throws IOException {
+        skipSP(input);
+
+        SfList result = new SfList();
+
+        if (peek(input) != -1) {
+            while (true) {
+                SfListMember listMember = parseSfListMember(input);
+                result.addListMember(listMember);
+                skipOWS(input);
+                if (peek(input) == -1) {
+                    break;
+                }
+                requireChar(input, ',');
+                skipOWS(input);
+                requireNotChar(input, -1);
+            }
+        }
+
+        skipSP(input);
+        requireChar(input, -1);
+        return result;
+    }
+
+
+    // Item or inner list
+    static SfListMember parseSfListMember(Reader input) throws IOException {
+        SfListMember listMember;
+        if (peek(input) == '(') {
+            listMember = parseSfInnerList(input);
+        } else {
+            listMember = parseSfBareItem(input);
+        }
+        parseSfParameters(input, listMember);
+        return listMember;
+    }
+
+
+    static SfInnerList parseSfInnerList(Reader input) throws IOException {
+        requireChar(input, '(');
+
+        SfInnerList innerList = new SfInnerList();
+
+        while (true) {
+            skipSP(input);
+            if (peek(input) == ')') {
+                break;
+            }
+            SfItem<?> item = parseSfBareItem(input);
+            parseSfParameters(input, item);
+            innerList.addListItem(item);
+            input.mark(1);
+            requireChar(input, ' ', ')');
+            input.reset();
+        }
+        requireChar(input, ')');
+
+        return innerList;
+    }
+
+
+    static SfDictionary parseSfDictionary(Reader input) throws IOException {
+        skipSP(input);
+
+        SfDictionary result = new SfDictionary();
+
+        if (peek(input) != -1) {
+            while (true) {
+                String key = parseSfKey(input);
+                SfListMember listMember;
+                input.mark(1);
+                int c = input.read();
+                if (c == '=') {
+                    listMember = parseSfListMember(input);
+                } else {
+                    listMember = new SfBoolean(true);
+                    input.reset();
+                }
+                parseSfParameters(input, listMember);
+                result.addDictionaryMember(key, listMember);
+                skipOWS(input);
+                if (peek(input) == -1) {
+                    break;
+                }
+                requireChar(input, ',');
+                skipOWS(input);
+                requireNotChar(input, -1);
+            }
+        }
+
+        skipSP(input);
+        requireChar(input, -1);
+        return result;
+    }
+
+
+    static SfItem<?> parseSfItem(Reader input) throws IOException {
+        skipSP(input);
+
+        SfItem<?> item = parseSfBareItem(input);
+        parseSfParameters(input, item);
+
+        skipSP(input);
+        requireChar(input, -1);
+        return item;
+    }
+
+
+    static SfItem<?> parseSfBareItem(Reader input) throws IOException {
+        int c = input.read();
+
+        SfItem<?> item;
+        if (c == '-' || HttpParser.isNumeric(c)) {
+            item = parseSfNumeric(input, c);
+        } else if (c == '\"') {
+            item = parseSfString(input);
+        } else if (c == '*' || HttpParser.isAlpha(c)) {
+            item = parseSfToken(input, c);
+        } else if (c == ':') {
+            item = parseSfByteSequence(input);
+        } else if (c == '?') {
+            item = parseSfBoolean(input);
+        } else {
+            throw new IllegalArgumentException(
+                    sm.getString("sf.bareitem.invalidCharacter", String.format("\\u%40X", Integer.valueOf(c))));
+        }
+
+        return item;
+    }
+
+
+    static void parseSfParameters(Reader input, SfListMember listMember) throws IOException {
+        while (true) {
+            if (peek(input) != ';') {
+                break;
+            }
+            requireChar(input, ';');
+            skipSP(input);
+            String key = parseSfKey(input);
+            SfItem<?> item;
+            input.mark(1);
+            int c = input.read();
+            if (c == '=') {
+                item = parseSfBareItem(input);
+            } else {
+                item = new SfBoolean(true);
+                input.reset();
+            }
+            listMember.addParameter(key, item);
+        }
+    }
+
+
+    static String parseSfKey(Reader input) throws IOException {
+        StringBuilder result = new StringBuilder();
+
+        input.mark(1);
+        int c = input.read();
+        if (!isKeyFirst(c)) {
+            throw new IllegalArgumentException(
+                    sm.getString("sf.key.invalidFirstCharacter", String.format("\\u%40X", Integer.valueOf(c))));
+        }
+
+        while (c != -1 && isKey(c)) {
+            result.append((char) c);
+            input.mark(1);
+            c = input.read();
+        }
+        input.reset();
+        return result.toString();
+    }
+
+
+    static SfItem<?> parseSfNumeric(Reader input, int first) throws IOException {
+        int sign = 1;
+        boolean integer = true;
+        int decimalPos = 0;
+
+        StringBuilder result = new StringBuilder();
+
+        int c;
+        if (first == '-') {
+            sign = -1;
+            c = input.read();
+        } else {
+            c = first;
+        }
+
+        if (!HttpParser.isNumeric(c)) {
+            throw new IllegalArgumentException(
+                    sm.getString("sf.numeric.invalidCharacter", String.format("\\u%40X", Integer.valueOf(c))));
+        }
+        result.append((char) c);
+        input.mark(1);
+        c = input.read();
+
+        while (c != -1) {
+            if (HttpParser.isNumeric(c)) {
+                result.append((char) c);
+            } else if (integer && c == '.') {
+                if (result.length() > 12) {
+                    throw new IllegalArgumentException(sm.getString("sf.numeric.integralPartTooLong"));
+                }
+                integer = false;
+                result.append((char) c);
+                decimalPos = result.length();
+            } else {
+                input.reset();
+                break;
+            }
+            if (integer && result.length() > 15) {
+                throw new IllegalArgumentException(sm.getString("sf.numeric.integerTooLong"));
+            }
+            if (!integer && result.length() > 16) {
+                throw new IllegalArgumentException(sm.getString("sf.numeric.decimalTooLong"));
+            }
+            input.mark(1);
+            c = input.read();
+        }
+
+        if (integer) {
+            return new SfInteger(Long.parseLong(result.toString()) * sign);
+        }
+
+        if (result.charAt(result.length() - 1) == '.') {
+            throw new IllegalArgumentException(sm.getString("sf.numeric.decimalInvalidFinal"));
+        }
+
+        if (result.length() - decimalPos > 3) {
+            throw new IllegalArgumentException(sm.getString("sf.numeric.decimalPartTooLong"));
+        }
+
+        return new SfDecimal(Double.parseDouble(result.toString()) * sign);
+    }
+
+
+    static SfString parseSfString(Reader input) throws IOException {
+        // It is known first character was '"'
+        StringBuilder result = new StringBuilder();
+
+        while (true) {
+            int c = input.read();
+            if (c == '\\') {
+                requireNotChar(input, -1);
+                c = input.read();
+                if (c != '\\' && c != '\"') {
+                    throw new IllegalArgumentException(
+                            sm.getString("sf.string.invalidEscape", String.format("\\u%40X", Integer.valueOf(c))));
+                }
+            } else {
+                if (c == '\"') {
+                    break;
+                }
+                // This test also covers unexpected EOF
+                if (c < 32 || c > 126) {
+                    throw new IllegalArgumentException(
+                            sm.getString("sf.string.invalidCharacter", String.format("\\u%40X", Integer.valueOf(c))));
+                }
+            }
+            result.append((char) c);
+        }
+
+        return new SfString(result.toString());
+    }
+
+
+    static SfToken parseSfToken(Reader input, int first) throws IOException {
+        // It is known first character is valid
+        StringBuilder result = new StringBuilder();
+
+        result.append((char) first);
+        while (true) {
+            input.mark(1);
+            int c = input.read();
+            if (!isToken(c)) {
+                input.reset();
+                break;
+            }
+            result.append((char) c);
+        }
+
+        return new SfToken(result.toString());
+    }
+
+
+    static SfByteSequence parseSfByteSequence(Reader input) throws IOException {
+        // It is known first character was ':'
+        StringBuilder base64 = new StringBuilder();
+
+        while (true) {
+            int c = input.read();
+
+            if (c == ':') {
+                break;
+            } else if (isBase64(c)) {
+                base64.append((char) c);
+            } else {
+                throw new IllegalArgumentException(
+                        sm.getString("sf.base64.invalidCharacter", String.format("\\u%40X", Integer.valueOf(c))));
+            }
+        }
+
+        return new SfByteSequence(Base64.decodeBase64(base64.toString()));
+    }
+
+
+    static SfBoolean parseSfBoolean(Reader input) throws IOException {
+        // It is known first character was '?'
+        int c = input.read();
+
+        if (c == '1') {
+            return new SfBoolean(true);
+        } else if (c == '0') {
+            return new SfBoolean(false);
+        } else {
+            throw new IllegalArgumentException(
+                    sm.getString("sf.boolean.invalidCharacter", String.format("\\u%40X", Integer.valueOf(c))));
+        }
+    }
+
+
+    static void skipSP(Reader input) throws IOException {
+        input.mark(1);
+        int c = input.read();
+        while (c == 32) {
+            input.mark(1);
+            c = input.read();
+        }
+        input.reset();
+    }
+
+
+    static void skipOWS(Reader input) throws IOException {
+        input.mark(1);
+        int c = input.read();
+        while (isOws(c)) {
+            input.mark(1);
+            c = input.read();
+        }
+        input.reset();
+    }
+
+
+    static void requireChar(Reader input, int... required) throws IOException {
+        int c = input.read();
+        for (int r : required) {
+            if (c == r) {
+                return;
+            }
+        }
+        throw new IllegalArgumentException(
+                sm.getString("sf.invalidCharacter", String.format("\\u%40X", Integer.valueOf(c))));
+    }
+
+
+    static void requireNotChar(Reader input, int required) throws IOException {
+        input.mark(1);
+        int c = input.read();
+        if (c == required) {
+            throw new IllegalArgumentException(
+                    sm.getString("sf.invalidCharacter", String.format("\\u%40X", Integer.valueOf(c))));
+        }
+        input.reset();
+    }
+
+
+    static int peek(Reader input) throws IOException {
+        input.mark(1);
+        int c = input.read();
+        input.reset();
+        return c;
+    }
+
+
+    static boolean isKeyFirst(int c) {
+        try {
+            return IS_KEY_FIRST[c];
+        } catch (ArrayIndexOutOfBoundsException ex) {
+            return false;
+        }
+    }
+
+
+    static boolean isKey(int c) {
+        try {
+            return IS_KEY[c];
+        } catch (ArrayIndexOutOfBoundsException ex) {
+            return false;
+        }
+    }
+
+
+    static boolean isOws(int c) {
+        try {
+            return IS_OWS[c];
+        } catch (ArrayIndexOutOfBoundsException ex) {
+            return false;
+        }
+    }
+
+
+    static boolean isBase64(int c) {
+        try {
+            return IS_BASE64[c];
+        } catch (ArrayIndexOutOfBoundsException ex) {
+            return false;
+        }
+    }
+
+
+    static boolean isToken(int c) {
+        try {
+            return IS_TOKEN[c];
+        } catch (ArrayIndexOutOfBoundsException ex) {
+            return false;
+        }
+    }
+
+
+    private StructuredField() {
+        // Utility class. Hide default constructor.
+    }
+
+
+    static class SfDictionary {
+        private Map<String,SfListMember> dictionary = new LinkedHashMap<>();
+
+        void addDictionaryMember(String key, SfListMember value) {
+            dictionary.put(key, value);
+        }
+
+        SfListMember getDictionaryMember(String key) {
+            return dictionary.get(key);
+        }
+    }
+
+    static class SfList {
+        private List<SfListMember> listMembers = new ArrayList<>();
+
+        void addListMember(SfListMember listMember) {
+            listMembers.add(listMember);
+        }
+    }
+
+    static class SfListMember {
+        private Map<String,SfItem<?>> parameters = null;
+
+        void addParameter(String key, SfItem<?> value) {
+            if (parameters == null) {
+                parameters = new LinkedHashMap<>();
+            }
+            parameters.put(key, value);
+        }
+    }
+
+    static class SfInnerList extends SfListMember {
+        List<SfItem<?>> listItems = new ArrayList<>();
+
+        SfInnerList() {
+            // Default constructor is NO-OP
+        }
+
+        void addListItem(SfItem<?> item) {
+            listItems.add(item);
+        }
+
+        List<SfItem<?>> getListItem() {
+            return listItems;
+        }
+    }
+
+    abstract static class SfItem<T> extends SfListMember {
+        private final T value;
+
+        SfItem(T value) {
+            this.value = value;
+        }
+
+        T getVaue() {
+            return value;
+        }
+    }
+
+    static class SfInteger extends SfItem<Long> {
+        SfInteger(long value) {
+            super(Long.valueOf(value));
+        }
+    }
+
+    static class SfDecimal extends SfItem<Double> {
+        SfDecimal(double value) {
+            super(Double.valueOf(value));
+        }
+    }
+
+    static class SfString extends SfItem<String> {
+        SfString(String value) {
+            super(value);
+        }
+    }
+
+    static class SfToken extends SfItem<String> {
+        SfToken(String value) {
+            super(value);
+        }
+    }
+
+    static class SfByteSequence extends SfItem<byte[]> {
+        SfByteSequence(byte[] value) {
+            super(value);
+        }
+    }
+
+    static class SfBoolean extends SfItem<Boolean> {
+        SfBoolean(boolean value) {
+            super(Boolean.valueOf(value));
+        }
+    }
+}
diff --git a/test/org/apache/coyote/http2/Http2TestBase.java b/test/org/apache/coyote/http2/Http2TestBase.java
index e02486d06d..d8183e3e0e 100644
--- a/test/org/apache/coyote/http2/Http2TestBase.java
+++ b/test/org/apache/coyote/http2/Http2TestBase.java
@@ -55,6 +55,7 @@ import org.apache.tomcat.util.codec.binary.Base64;
 import org.apache.tomcat.util.compat.JrePlatform;
 import org.apache.tomcat.util.http.FastHttpDateFormat;
 import org.apache.tomcat.util.http.MimeHeaders;
+import org.apache.tomcat.util.http.parser.Priority;
 import org.apache.tomcat.util.net.TesterSupport;
 
 /**
@@ -871,6 +872,33 @@ public abstract class Http2TestBase extends TomcatBaseTest {
     }
 
 
+    void sendPriorityUpdate(int streamId, int urgency, boolean incremental) throws IOException {
+        // Need to know the payload length first
+        StringBuilder sb = new StringBuilder("u=");
+        sb.append(urgency);
+        if (incremental) {
+            sb.append(", i");
+        }
+        byte[] payload = sb.toString().getBytes(StandardCharsets.US_ASCII);
+
+        byte[] priorityUpdateFrame = new byte[13 + payload.length];
+
+        // length
+        ByteUtil.setThreeBytes(priorityUpdateFrame, 0, 4 + payload.length);
+        // type
+        priorityUpdateFrame[3] = FrameType.PRIORITY_UPDATE.getIdByte();
+        // Stream ID
+        ByteUtil.set31Bits(priorityUpdateFrame, 5, 0);
+
+        // Payload
+        ByteUtil.set31Bits(priorityUpdateFrame, 9, streamId);
+        System.arraycopy(payload, 0, priorityUpdateFrame, 13, payload.length);
+
+        os.write(priorityUpdateFrame);
+        os.flush();
+    }
+
+
     void sendSettings(int streamId, boolean ack, SettingValue... settings) throws IOException {
         // length
         int settingsCount;
@@ -1068,13 +1096,6 @@ public abstract class Http2TestBase extends TomcatBaseTest {
             return this;
         }
 
-        @Override
-        public void reprioritise(int streamId, int parentStreamId, boolean exclusive, int weight) {
-            lastStreamId = Integer.toString(streamId);
-            trace.append(
-                    lastStreamId + "-Reprioritise-[" + parentStreamId + "]-[" + exclusive + "]-[" + weight + "]\n");
-        }
-
 
         @Override
         public void emitHeader(String name, String value) {
@@ -1179,6 +1200,13 @@ public abstract class Http2TestBase extends TomcatBaseTest {
         }
 
 
+        @Override
+        public void priorityUpdate(int prioritizedStreamID, Priority p) throws Http2Exception {
+            trace.append(
+                    prioritizedStreamID + "-PriorityUpdate-[" + p.getUrgency() + "]-[" + p.getIncremental() + "]\n");
+        }
+
+
         @Override
         public void onSwallowedUnknownFrame(int streamId, int frameTypeId, int flags, int size) {
             trace.append(streamId);
@@ -1227,6 +1255,12 @@ public abstract class Http2TestBase extends TomcatBaseTest {
         public long getBytesRead() {
             return bytesRead;
         }
+
+
+        @Override
+        public void increaseOverheadCount(FrameType frameType) {
+            // NO-OP. Client doesn't track overhead.
+        }
     }
 
 
diff --git a/test/org/apache/coyote/http2/TestAbstractStream.java b/test/org/apache/coyote/http2/TestAbstractStream.java
deleted file mode 100644
index fcaf8b723f..0000000000
--- a/test/org/apache/coyote/http2/TestAbstractStream.java
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.coyote.http2;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/*
- * This tests use A=1, B=2, etc to map stream IDs to the names used in the
- * figures.
- */
-public class TestAbstractStream {
-
-    @Test
-    public void testDependenciesFig3() {
-        // Setup
-        Http2UpgradeHandler handler = new Http2UpgradeHandler(new Http2Protocol(), null, null);
-        Stream a = new Stream(Integer.valueOf(1), handler);
-        Stream b = new Stream(Integer.valueOf(2), handler);
-        Stream c = new Stream(Integer.valueOf(3), handler);
-        Stream d = new Stream(Integer.valueOf(4), handler);
-        b.rePrioritise(a, false, 16);
-        c.rePrioritise(a, false, 16);
-
-        // Action
-        d.rePrioritise(a, false, 16);
-
-        // Check parents
-        Assert.assertEquals(handler, a.getParentStream());
-        Assert.assertEquals(a, b.getParentStream());
-        Assert.assertEquals(a, c.getParentStream());
-        Assert.assertEquals(a, d.getParentStream());
-
-        // Check children
-        Assert.assertEquals(3, a.getChildStreams().size());
-        Assert.assertTrue(a.getChildStreams().contains(b));
-        Assert.assertTrue(a.getChildStreams().contains(c));
-        Assert.assertTrue(a.getChildStreams().contains(d));
-        Assert.assertEquals(0, b.getChildStreams().size());
-        Assert.assertEquals(0, c.getChildStreams().size());
-        Assert.assertEquals(0, d.getChildStreams().size());
-    }
-
-
-    @Test
-    public void testDependenciesFig4() {
-        // Setup
-        Http2UpgradeHandler handler = new Http2UpgradeHandler(new Http2Protocol(), null, null);
-        Stream a = new Stream(Integer.valueOf(1), handler);
-        Stream b = new Stream(Integer.valueOf(2), handler);
-        Stream c = new Stream(Integer.valueOf(3), handler);
-        Stream d = new Stream(Integer.valueOf(4), handler);
-        b.rePrioritise(a, false, 16);
-        c.rePrioritise(a, false, 16);
-
-        // Action
-        d.rePrioritise(a, true, 16);
-
-        // Check parents
-        Assert.assertEquals(handler, a.getParentStream());
-        Assert.assertEquals(d, b.getParentStream());
-        Assert.assertEquals(d, c.getParentStream());
-        Assert.assertEquals(a, d.getParentStream());
-
-        // Check children
-        Assert.assertEquals(1, a.getChildStreams().size());
-        Assert.assertTrue(a.getChildStreams().contains(d));
-        Assert.assertEquals(2, d.getChildStreams().size());
-        Assert.assertTrue(d.getChildStreams().contains(b));
-        Assert.assertTrue(d.getChildStreams().contains(c));
-        Assert.assertEquals(0, b.getChildStreams().size());
-        Assert.assertEquals(0, c.getChildStreams().size());
-    }
-
-
-    @Test
-    public void testDependenciesFig5NonExclusive() {
-        // Setup
-        Http2UpgradeHandler handler = new Http2UpgradeHandler(new Http2Protocol(), null, null);
-        Stream a = new Stream(Integer.valueOf(1), handler);
-        Stream b = new Stream(Integer.valueOf(2), handler);
-        Stream c = new Stream(Integer.valueOf(3), handler);
-        Stream d = new Stream(Integer.valueOf(4), handler);
-        Stream e = new Stream(Integer.valueOf(5), handler);
-        Stream f = new Stream(Integer.valueOf(6), handler);
-        b.rePrioritise(a, false, 16);
-        c.rePrioritise(a, false, 16);
-        d.rePrioritise(c, false, 16);
-        e.rePrioritise(c, false, 16);
-        f.rePrioritise(d, false, 16);
-
-        // Action
-        a.rePrioritise(d, false, 16);
-
-        // Check parents
-        Assert.assertEquals(handler, d.getParentStream());
-        Assert.assertEquals(d, f.getParentStream());
-        Assert.assertEquals(d, a.getParentStream());
-        Assert.assertEquals(a, b.getParentStream());
-        Assert.assertEquals(a, c.getParentStream());
-        Assert.assertEquals(c, e.getParentStream());
-
-        // Check children
-        Assert.assertEquals(2, d.getChildStreams().size());
-        Assert.assertTrue(d.getChildStreams().contains(a));
-        Assert.assertTrue(d.getChildStreams().contains(f));
-        Assert.assertEquals(0, f.getChildStreams().size());
-        Assert.assertEquals(2, a.getChildStreams().size());
-        Assert.assertTrue(a.getChildStreams().contains(b));
-        Assert.assertTrue(a.getChildStreams().contains(c));
-        Assert.assertEquals(0, b.getChildStreams().size());
-        Assert.assertEquals(1, c.getChildStreams().size());
-        Assert.assertTrue(c.getChildStreams().contains(e));
-        Assert.assertEquals(0, e.getChildStreams().size());
-    }
-
-
-    @Test
-    public void testDependenciesFig5Exclusive() {
-        // Setup
-        Http2UpgradeHandler handler = new Http2UpgradeHandler(new Http2Protocol(), null, null);
-        Stream a = new Stream(Integer.valueOf(1), handler);
-        Stream b = new Stream(Integer.valueOf(2), handler);
-        Stream c = new Stream(Integer.valueOf(3), handler);
-        Stream d = new Stream(Integer.valueOf(4), handler);
-        Stream e = new Stream(Integer.valueOf(5), handler);
-        Stream f = new Stream(Integer.valueOf(6), handler);
-        b.rePrioritise(a, false, 16);
-        c.rePrioritise(a, false, 16);
-        d.rePrioritise(c, false, 16);
-        e.rePrioritise(c, false, 16);
-        f.rePrioritise(d, false, 16);
-
-        // Action
-        a.rePrioritise(d, true, 16);
-
-        // Check parents
-        Assert.assertEquals(handler, d.getParentStream());
-        Assert.assertEquals(d, a.getParentStream());
-        Assert.assertEquals(a, b.getParentStream());
-        Assert.assertEquals(a, c.getParentStream());
-        Assert.assertEquals(a, f.getParentStream());
-        Assert.assertEquals(c, e.getParentStream());
-
-        // Check children
-        Assert.assertEquals(1, d.getChildStreams().size());
-        Assert.assertTrue(d.getChildStreams().contains(a));
-        Assert.assertEquals(3, a.getChildStreams().size());
-        Assert.assertTrue(a.getChildStreams().contains(b));
-        Assert.assertTrue(a.getChildStreams().contains(c));
-        Assert.assertTrue(a.getChildStreams().contains(f));
-        Assert.assertEquals(0, b.getChildStreams().size());
-        Assert.assertEquals(0, f.getChildStreams().size());
-        Assert.assertEquals(1, c.getChildStreams().size());
-        Assert.assertTrue(c.getChildStreams().contains(e));
-        Assert.assertEquals(0, e.getChildStreams().size());
-    }
-
-
-    @Test
-    public void testCircular01() {
-        // Setup
-        Http2UpgradeHandler handler = new Http2UpgradeHandler(new Http2Protocol(), null, null);
-        Stream a = new Stream(Integer.valueOf(1), handler);
-        Stream b = new Stream(Integer.valueOf(2), handler);
-        Stream c = new Stream(Integer.valueOf(3), handler);
-
-        b.rePrioritise(a, false, 16);
-        c.rePrioritise(b, false, 16);
-
-        // Action
-        a.rePrioritise(c, false, 16);
-
-        // Check parents
-        Assert.assertEquals(c, a.getParentStream());
-        Assert.assertEquals(a, b.getParentStream());
-        Assert.assertEquals(handler, c.getParentStream());
-
-        // Check children
-        Assert.assertEquals(1, handler.getChildStreams().size());
-        Assert.assertTrue(handler.getChildStreams().contains(c));
-        Assert.assertEquals(1, a.getChildStreams().size());
-        Assert.assertTrue(a.getChildStreams().contains(b));
-        Assert.assertEquals(0, b.getChildStreams().size());
-        Assert.assertEquals(1, c.getChildStreams().size());
-        Assert.assertTrue(c.getChildStreams().contains(a));
-    }
-
-
-    @Test
-    public void testCircular02() {
-        // Setup
-        Http2UpgradeHandler handler = new Http2UpgradeHandler(new Http2Protocol(), null, null);
-        Stream a = new Stream(Integer.valueOf(1), handler);
-        Stream b = new Stream(Integer.valueOf(2), handler);
-        Stream c = new Stream(Integer.valueOf(3), handler);
-        Stream d = new Stream(Integer.valueOf(4), handler);
-        Stream e = new Stream(Integer.valueOf(5), handler);
-        Stream f = new Stream(Integer.valueOf(6), handler);
-
-        b.rePrioritise(a, false, 16);
-        c.rePrioritise(b, false, 16);
-        e.rePrioritise(d, false, 16);
-        f.rePrioritise(e, false, 16);
-
-        // Action
-        a.rePrioritise(f, false, 16);
-        d.rePrioritise(c, false, 16);
-
-        // Check parents
-        Assert.assertEquals(f, a.getParentStream());
-        Assert.assertEquals(a, b.getParentStream());
-        Assert.assertEquals(handler, c.getParentStream());
-        Assert.assertEquals(c, d.getParentStream());
-        Assert.assertEquals(d, e.getParentStream());
-        Assert.assertEquals(e, f.getParentStream());
-
-        // Check children
-        Assert.assertEquals(1, handler.getChildStreams().size());
-        Assert.assertTrue(handler.getChildStreams().contains(c));
-        Assert.assertEquals(1, a.getChildStreams().size());
-        Assert.assertTrue(a.getChildStreams().contains(b));
-        Assert.assertEquals(0, b.getChildStreams().size());
-        Assert.assertEquals(1, c.getChildStreams().size());
-        Assert.assertTrue(c.getChildStreams().contains(d));
-        Assert.assertEquals(1, d.getChildStreams().size());
-        Assert.assertTrue(d.getChildStreams().contains(e));
-        Assert.assertEquals(1, e.getChildStreams().size());
-        Assert.assertTrue(e.getChildStreams().contains(f));
-        Assert.assertEquals(1, f.getChildStreams().size());
-        Assert.assertTrue(f.getChildStreams().contains(a));
-    }
-
-
-    // https://bz.apache.org/bugzilla/show_bug.cgi?id=61682
-    @Test
-    public void testCircular03() {
-        // Setup
-        Http2UpgradeHandler handler = new Http2UpgradeHandler(new Http2Protocol(), null, null);
-        Stream a = new Stream(Integer.valueOf(1), handler);
-        Stream b = new Stream(Integer.valueOf(3), handler);
-        Stream c = new Stream(Integer.valueOf(5), handler);
-        Stream d = new Stream(Integer.valueOf(7), handler);
-
-        // Action
-        b.rePrioritise(a, false, 16);
-        c.rePrioritise(a, false, 16);
-        d.rePrioritise(b, false, 16);
-        c.rePrioritise(handler, false, 16);
-        a.rePrioritise(c, false, 16);
-
-        // Check parents
-        Assert.assertEquals(c, a.getParentStream());
-        Assert.assertEquals(a, b.getParentStream());
-        Assert.assertEquals(handler, c.getParentStream());
-        Assert.assertEquals(b, d.getParentStream());
-
-        // This triggers the StackOverflowError
-        Assert.assertTrue(c.isDescendant(d));
-
-        // Check children
-        Assert.assertEquals(1, handler.getChildStreams().size());
-        Assert.assertTrue(handler.getChildStreams().contains(c));
-        Assert.assertEquals(1, c.getChildStreams().size());
-        Assert.assertTrue(c.getChildStreams().contains(a));
-        Assert.assertEquals(1, a.getChildStreams().size());
-        Assert.assertTrue(a.getChildStreams().contains(b));
-        Assert.assertEquals(1, b.getChildStreams().size());
-        Assert.assertTrue(b.getChildStreams().contains(d));
-        Assert.assertEquals(0, d.getChildStreams().size());
-    }
-}
diff --git a/test/org/apache/coyote/http2/TestHttp2Section_5_3.java b/test/org/apache/coyote/http2/TestHttp2Section_5_3.java
deleted file mode 100644
index 872748bd61..0000000000
--- a/test/org/apache/coyote/http2/TestHttp2Section_5_3.java
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.coyote.http2;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Unit tests for Section 5.3 of <a href="https://tools.ietf.org/html/rfc7540">RFC 7540</a>. <br>
- * The order of tests in this class is aligned with the order of the requirements in the RFC. Note: Unit tests for the
- * examples described by each of the figures may be found in {@link TestAbstractStream}.
- */
-public class TestHttp2Section_5_3 extends Http2TestBase {
-
-    // Section 5.3.1
-
-    @Test
-    public void testStreamDependsOnSelf() throws Exception {
-        http2Connect();
-
-        sendPriority(3, 3, 15);
-
-        parser.readFrame();
-
-        Assert.assertEquals("3-RST-[1]\n", output.getTrace());
-    }
-
-
-    // Section 5.3.2
-
-    @Test
-    public void testWeighting() throws Exception {
-
-        http2Connect();
-
-        // This test uses small window updates that will trigger the excessive
-        // overhead protection so disable it.
-        http2Protocol.setOverheadWindowUpdateThreshold(0);
-        // May also see (rarely, depends on timing) sequential 1 byte data
-        // frames on the same Stream
-        http2Protocol.setOverheadDataThreshold(0);
-
-
-        // Default connection window size is 64k - 1. Initial request will have
-        // used 8k (56k -1). Increase it to 57k
-        sendWindowUpdate(0, 1 + 1024);
-
-        // Use up 56k of the connection window
-        for (int i = 3; i < 17; i += 2) {
-            sendSimpleGetRequest(i);
-            readSimpleGetResponse();
-        }
-
-        // Set the default window size to 1024 bytes
-        sendSettings(0, false, new SettingValue(4, 1024));
-        // Wait for the ack
-        parser.readFrame();
-        // Debugging Gump failure
-        log.info(output.getTrace());
-        output.clearTrace();
-
-        // At this point the connection window should be 1k and any new stream
-        // should have a window of 1k as well
-
-        // Set up streams A=17, B=19, C=21
-        sendPriority(17, 0, 15);
-        sendPriority(19, 17, 3);
-        sendPriority(21, 17, 11);
-
-        // First, process a request on stream 17. This should consume both
-        // stream 17's window and the connection window.
-        sendSimpleGetRequest(17);
-        // 17-headers, 17-1k-body
-        parser.readFrame();
-        // Debugging Gump failure
-        log.info(output.getTrace());
-        parser.readFrame();
-        // Debugging Gump failure
-        log.info(output.getTrace());
-        output.clearTrace();
-
-        // Send additional requests. Connection window is empty so only headers
-        // will be returned.
-        sendSimpleGetRequest(19);
-        sendSimpleGetRequest(21);
-
-        // Open up the flow control windows for stream 19 & 21 to more than the
-        // size of a simple request (8k)
-        sendWindowUpdate(19, 16 * 1024);
-        sendWindowUpdate(21, 16 * 1024);
-
-        // Read some frames
-        // 19-headers, 21-headers
-        parser.readFrame();
-        // Debugging Gump failure
-        log.info(output.getTrace());
-        parser.readFrame();
-        // Debugging Gump failure
-        log.info(output.getTrace());
-        output.clearTrace();
-
-        // At this point 17 is blocked because the stream window is zero and
-        // 19 & 21 are blocked because the connection window is zero.
-        //
-        // To test allocation, the connection window size is increased by 1.
-        // This should result in an allocation of 1 byte each to streams 19 and
-        // 21 but because each stream is processed in a separate thread it is
-        // not guaranteed that both streams will be blocked when the connection
-        // window size is increased. The test therefore sends 1 byte window
-        // updates until a small body has been seen from each stream. Then the
-        // tests sends a larger (1024 byte) window update and checks that it is
-        // correctly distributed between the streams.
-        //
-        // The test includes a margin to allow for the potential differences in
-        // response caused by timing differences on the server.
-        //
-        // The loop below handles 0, 1 or 2 stream being blocked
-        // - If 0 streams are blocked the connection window will be set to one
-        // and that will be consumed by the first stream to attempt to write.
-        // That body frame will be read by the client. The stream will then be
-        // blocked and the loop will start again.
-        // - If 1 stream is blocked, the connection window will be set to one
-        // which will then be consumed by the blocked stream. After writing
-        // the single byte the stream will again be blocked and the loop will
-        // start again.
-        // - If 2 streams are blocked the connection window will be set to one
-        // but one byte will be permitted for both streams (due to rounding in
-        // the allocation). The window size should be -1 (see below). Two
-        // frames (one for each stream will be written) one of which will be
-        // consumed by the client. The loop will start again and the Window
-        // size incremented to zero. No data will be written by the streams
-        // but the second data frame written in the last iteration of the loop
-        // will be read. The loop will then exit since frames from both
-        // streams will have been observed.
-        boolean seen19 = false;
-        boolean seen21 = false;
-        while (!seen19 || !seen21) {
-            sendWindowUpdate(0, 1);
-            parser.readFrame();
-            // Debugging Gump failure
-            log.info(output.getTrace());
-            int[] data = parseBodyFrame(output.getTrace());
-            if (data[0] == 19) {
-                seen19 = true;
-            } else if (data[0] == 21) {
-                seen21 = true;
-            } else {
-                // Unexpected stream
-                Assert.fail("Unexpected stream: [" + output.getTrace() + "]");
-            }
-            // A value of more than 1 here is unlikely but possible depending on
-            // how threads are scheduled. This has been observed as high as 21
-            // on ci.apache.org so allow a margin and use 30.
-            if (data[1] > 30) {
-                // Larger than expected body size
-                Assert.fail("Larger than expected body: [" + output.getTrace() + "] " + data[1]);
-            }
-            output.clearTrace();
-        }
-
-        // Need to give both server side threads enough time to request an
-        // allocation from the connection flow control window before sending
-        // the next window update.
-        Thread.sleep(1000);
-
-        sendWindowUpdate(0, 1024);
-        parser.readFrame();
-
-        // Make sure you have read the big comment before the loop above. It is
-        // possible that the timing of the server threads is such that there are
-        // still small body frames to read.
-        int[] data = parseBodyFrame(output.getTrace());
-        while (data[1] < 20) {
-            // Debugging Gump failure
-            log.info(output.getTrace());
-            output.clearTrace();
-            parser.readFrame();
-            data = parseBodyFrame(output.getTrace());
-        }
-
-        // Should now have two larger body frames. One has already been read.
-        seen19 = false;
-        seen21 = false;
-        while (!seen19 && !seen21) {
-            // Debugging Gump failure
-            log.info(output.getTrace());
-            if (data[0] == 19) {
-                seen19 = true;
-                // If everything works instantly this should be 256 but allow a
-                // fairly large margin for timing differences
-                if (data[1] < 216 || data[1] > 296) {
-                    Assert.fail("Unexpected body size: [" + output.getTrace() + "]");
-                }
-            } else if (data[0] == 21) {
-                seen21 = true;
-                // If everything works instantly this should be 768 but allow a
-                // fairly large margin for timing differences
-                if (data[1] < 728 || data[1] > 808) {
-                    Assert.fail("Unexpected body size: [" + output.getTrace() + "]");
-                }
-            } else {
-                Assert.fail("Unexpected stream: [" + output.getTrace() + "]");
-            }
-            output.clearTrace();
-            parser.readFrame();
-            data = parseBodyFrame(output.getTrace());
-        }
-        // Debugging Gump failure
-        log.info(output.getTrace());
-        output.clearTrace();
-
-        // Release everything and read all the remaining data
-        sendWindowUpdate(0, 1024 * 1024);
-        sendWindowUpdate(17, 1024 * 1024);
-
-        // Read remaining frames
-        // 17-7k-body, 19~8k-body, 21~8k-body
-        for (int i = 0; i < 3; i++) {
-            parser.readFrame();
-            // Debugging Gump failure
-            log.info(output.getTrace());
-        }
-    }
-
-
-    @Test
-    public void testReleaseFullBacklog() throws Exception {
-
-        http2Connect();
-
-        // This test uses small window updates that will trigger the excessive
-        // overhead protection so disable it.
-        http2Protocol.setOverheadWindowUpdateThreshold(0);
-        // May also see (rarely, depends on timing) sequential 1 byte data
-        // frames on the same Stream
-        http2Protocol.setOverheadDataThreshold(0);
-
-
-        // Default connection window size is 64k - 1. Initial request will have
-        // used 8k (56k -1). Increase it to 57k
-        sendWindowUpdate(0, 1 + 1024);
-
-        // Use up 56k of the connection window
-        for (int i = 3; i < 17; i += 2) {
-            sendSimpleGetRequest(i);
-            readSimpleGetResponse();
-        }
-
-        output.clearTrace();
-
-        // At this point the connection window should be 1k and any new stream
-        // should have a window of 64k
-
-        // Create priority tree. This test requires a blocked stream to depend on a closed stream
-        sendPriority(17, 15, 15);
-
-        // Process a request on stream 17.
-        // This should consume the connection window and put streams 15 and 17 in the backlog.
-        sendSimpleGetRequest(17);
-        // 17-headers, 17-1k-body
-        parser.readFrame();
-        parser.readFrame();
-        output.clearTrace();
-
-        // At this point 17 is blocked because the connection window is zero
-
-        // Send a large enough Window update to free the whole backlog
-        sendWindowUpdate(0, 8 * 1024);
-
-        parser.readFrame();
-
-        Assert.assertEquals("17-Body-7168\n17-EndOfStream\n", output.getTrace());
-    }
-
-
-    private int[] parseBodyFrame(String output) {
-        String[] parts = output.trim().split("-");
-        if (parts.length != 3 || !"Body".equals(parts[1])) {
-            Assert.fail("Unexpected output: [" + output + "]");
-        }
-
-        int[] result = new int[2];
-
-        result[0] = Integer.parseInt(parts[0]);
-        result[1] = Integer.parseInt(parts[2]);
-
-        return result;
-    }
-}
diff --git a/test/org/apache/coyote/http2/TestRfc9218.java b/test/org/apache/coyote/http2/TestRfc9218.java
new file mode 100644
index 0000000000..c42fd6ed7b
--- /dev/null
+++ b/test/org/apache/coyote/http2/TestRfc9218.java
@@ -0,0 +1,174 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.coyote.http2;
+
+import java.io.IOException;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestRfc9218 extends Http2TestBase {
+
+    @Test
+    public void testPriority() throws Exception {
+        http2Connect();
+
+        /*
+         * This test uses small window updates and data frames that will trigger the excessive overhead protection so
+         * disable it.
+         */
+        http2Protocol.setOverheadWindowUpdateThreshold(0);
+        http2Protocol.setOverheadDataThreshold(0);
+
+        // Default connection window size is 64k - 1. Initial request will have used 8k (56k -1). Increase it to 57k.
+        sendWindowUpdate(0, 1 + 1024);
+
+        // Consume 56k of the connection window
+        for (int i = 3; i < 17; i += 2) {
+            sendSimpleGetRequest(i);
+            readSimpleGetResponse();
+        }
+
+        // At this point the connection window should be 1k
+
+        // Process a request on stream 17. This should consume the connection window.
+        sendSimpleGetRequest(17);
+        // 17-headers, 17-1k-body
+        parser.readFrame();
+        parser.readFrame();
+        output.clearTrace();
+
+        // Send additional requests. Connection window is empty so only headers will be returned.
+        sendSimpleGetRequest(19);
+        sendSimpleGetRequest(21);
+
+        // 19-headers, 21-headers
+        parser.readFrame();
+        parser.readFrame();
+        output.clearTrace();
+
+        // At this point 17, 19 and 21 are all blocked because the connection window is zero.
+        // 17 - 7k body left
+        // 19 - 8k body left
+        // 21 - 8k body left
+
+        // Add 1k to the connection window. Should be used for stream 17.
+        sendWindowUpdate(0, 1024);
+        parser.readFrame();
+        Assert.assertEquals("17-Body-1024\n", output.getTrace());
+        output.clearTrace();
+
+        // 17 - 6k body left
+        // 19 - 8k body left
+        // 21 - 8k body left
+
+        // Re-order the priorities
+        sendPriorityUpdate(19, 2, false);
+        sendPriorityUpdate(21, 1, false);
+
+        // Add 1k to the connection window. Should be used for stream 21.
+        sendWindowUpdate(0, 1024);
+        parser.readFrame();
+
+        Assert.assertEquals("21-Body-1024\n", output.getTrace());
+        output.clearTrace();
+
+        // 17 - 6k body left
+        // 19 - 8k body left
+        // 21 - 7k body left
+
+        // Re-order the priorities
+        sendPriorityUpdate(17, 3, true);
+        sendPriorityUpdate(19, 3, true);
+        sendPriorityUpdate(21, 3, true);
+
+        // Add 3k to the connection window. Should be split between 17, 19 and 21.
+        sendWindowUpdate(0, 1024 * 3);
+        parser.readFrame();
+        parser.readFrame();
+        parser.readFrame();
+
+        String trace = output.getTrace();
+        Assert.assertTrue(trace.contains("17-Body-877\n"));
+        trace = trace.replace("17-Body-877\n", "");
+        Assert.assertTrue(trace.contains("19-Body-1170\n"));
+        trace = trace.replace("19-Body-1170\n", "");
+        Assert.assertTrue(trace.contains("21-Body-1024\n"));
+        trace = trace.replace("21-Body-1024\n", "");
+        Assert.assertEquals(0, trace.length());
+        output.clearTrace();
+
+        // 1 byte unallocated in connection window
+        // 17 - 5267 body left
+        // 19 - 7022 body left
+        // 21 - 6144 body left
+
+        // Add 1 byte to the connection window. Due to rounding up, each stream should get 1 byte.
+        sendWindowUpdate(0, 1);
+        parser.readFrame();
+        parser.readFrame();
+        parser.readFrame();
+
+        trace = output.getTrace();
+        Assert.assertTrue(trace.contains("17-Body-1\n"));
+        trace = trace.replace("17-Body-1\n", "");
+        Assert.assertTrue(trace.contains("19-Body-1\n"));
+        trace = trace.replace("19-Body-1\n", "");
+        Assert.assertTrue(trace.contains("21-Body-1\n"));
+        trace = trace.replace("21-Body-1\n", "");
+        Assert.assertEquals(0, trace.length());
+        output.clearTrace();
+
+        // 1 byte over allocated in connection window
+        // 17 - 5266 body left
+        // 19 - 7021 body left
+        // 21 - 6143 body left
+
+        // Re-order the priorities
+        sendPriorityUpdate(17, 2, true);
+
+        /*
+         * Add 8k to the connection window. Should clear the connection window over allocation and fully allocate 17
+         * with the remainder split equally between 17 and 21.
+         */
+        sendWindowUpdate(0, 1024 * 8);
+        // Use try/catch as third read has been failing on some tests runs
+        try {
+            parser.readFrame();
+            parser.readFrame();
+            parser.readFrame();
+        } catch (IOException ioe) {
+            // Dump for debugging purposes
+            ioe.printStackTrace();
+            // Continue - we'll get trace dumped to stdout below
+        }
+
+        trace = output.getTrace();
+        System.out.println(trace);
+        Assert.assertTrue(trace.contains("17-Body-5266\n"));
+        trace = trace.replace("17-Body-5266\n", "");
+        Assert.assertTrue(trace.contains("17-EndOfStream\n"));
+        trace = trace.replace("17-EndOfStream\n", "");
+        Assert.assertTrue(trace.contains("19-Body-1560\n"));
+        trace = trace.replace("19-Body-1560\n", "");
+        Assert.assertTrue(trace.contains("21-Body-1365\n"));
+        trace = trace.replace("21-Body-1365\n", "");
+        Assert.assertEquals(0, trace.length());
+
+        // Test doesn't read the read of the body for streams 19 and 21.
+    }
+}
diff --git a/java/org/apache/coyote/http2/Constants.java b/test/org/apache/tomcat/util/http/parser/TestPriority.java
similarity index 52%
copy from java/org/apache/coyote/http2/Constants.java
copy to test/org/apache/tomcat/util/http/parser/TestPriority.java
index 739ae7eb12..eeac940f6e 100644
--- a/java/org/apache/coyote/http2/Constants.java
+++ b/test/org/apache/tomcat/util/http/parser/TestPriority.java
@@ -14,26 +14,24 @@
  *  See the License for the specific language governing permissions and
  *  limitations under the License.
  */
-package org.apache.coyote.http2;
+package org.apache.tomcat.util.http.parser;
 
-public class Constants {
+import java.io.Reader;
+import java.io.StringReader;
 
-    // Prioritisation
-    public static final int DEFAULT_WEIGHT = 16;
+import org.junit.Assert;
+import org.junit.Test;
 
-    // Parsing
-    static final int DEFAULT_HEADER_READ_BUFFER_SIZE = 1024;
+public class TestPriority {
 
-    // Header frame size
-    // TODO: Is 1k the optimal value?
-    static final int DEFAULT_HEADERS_FRAME_SIZE = 1024;
-    // TODO: Is 64 too big? Just the status header with compression
-    static final int DEFAULT_HEADERS_ACK_FRAME_SIZE = 64;
+    @Test
+    public void testOnlyIncremental() throws Exception {
+        String input = "i";
+        Reader reader = new StringReader(input);
 
-    // Limits
-    static final int DEFAULT_MAX_COOKIE_COUNT = 200;
-    static final int DEFAULT_MAX_HEADER_COUNT = 100;
-    static final int DEFAULT_MAX_HEADER_SIZE = 8 * 1024;
-    static final int DEFAULT_MAX_TRAILER_COUNT = 100;
-    static final int DEFAULT_MAX_TRAILER_SIZE = 8 * 1024;
+        Priority p = Priority.parsePriority(reader);
+
+        Assert.assertEquals(Priority.DEFAULT_URGENCY, p.getUrgency());
+        Assert.assertTrue(p.getIncremental());
+    }
 }
diff --git a/test/org/apache/tomcat/util/http/parser/TesterHttpWgStructuredField.java b/test/org/apache/tomcat/util/http/parser/TesterHttpWgStructuredField.java
new file mode 100644
index 0000000000..1c86dd51cb
--- /dev/null
+++ b/test/org/apache/tomcat/util/http/parser/TesterHttpWgStructuredField.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.tomcat.util.http.parser;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.List;
+import java.util.Map;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.tomcat.util.buf.StringUtils;
+import org.apache.tomcat.util.json.JSONParser;
+
+/*
+ * Not run automatically (due to name) as it requires a local git clone of
+ * https://github.com/httpwg/structured-field-tests
+ */
+public class TesterHttpWgStructuredField {
+
+    private static final String testsPath = System.getProperty("user.home") + "/repos/httpwg-sf-tests";
+
+
+    @Test
+    public void test() throws Exception {
+        File testDir = new File(testsPath);
+        doTestDirectory(testDir);
+    }
+
+
+    private void doTestDirectory(File directory) throws Exception {
+        for (File file : directory.listFiles()) {
+            if (file.isDirectory()) {
+                if (!file.getName().equals("serialisation-tests")) {
+                    doTestDirectory(file);
+                }
+            } else if (file.isFile()) {
+                if (file.getName().endsWith(".json")) {
+                    doTestFile(file);
+                }
+            }
+        }
+    }
+
+
+    private void doTestFile(File file) throws Exception {
+        System.out.println(file.getAbsolutePath());
+
+        try (FileInputStream fis = new FileInputStream(file)) {
+            JSONParser parser = new JSONParser(fis);
+            List<Object> array = parser.parseArray();
+            for (Object obj : array) {
+                if (obj instanceof Map) {
+                    doTestMap((Map<?,?>) obj);
+                } else {
+                    Assert.fail();
+                }
+            }
+        }
+    }
+
+
+    private void doTestMap(Map<?,?> map) throws Exception {
+        String name = (String) map.get("name");
+        @SuppressWarnings("unchecked")
+        List<String> rawLines = (List<String>) map.get("raw");
+        String headerType = (String) map.get("header_type");
+        Boolean mustFail = ((Boolean) map.get("must_fail"));
+        if (mustFail == null) {
+            mustFail = Boolean.FALSE;
+        }
+        Boolean canFail = ((Boolean) map.get("can_fail"));
+        if (canFail == null) {
+            canFail = Boolean.FALSE;
+        }
+        String raw = StringUtils.join(rawLines);
+        /*
+         * The simple JSON parser may not be handling escape sequences
+         * correctly.
+         */
+        String unescaped = raw.replace("\\\"", "\"");
+        unescaped = unescaped.replace("\\b", "\u0008");
+        unescaped = unescaped.replace("\\t", "\t");
+        unescaped = unescaped.replace("\\n", "\n");
+        unescaped = unescaped.replace("\\f", "\u000c");
+        unescaped = unescaped.replace("\\r", "\r");
+        unescaped = unescaped.replace("\\\\", "\\");
+        Reader input = new StringReader(unescaped);
+
+        try {
+            switch (headerType) {
+            case "item": {
+                StructuredField.parseSfItem(input);
+                break;
+            }
+            case "list": {
+                StructuredField.parseSfList(input);
+                break;
+            }
+            case "dictionary": {
+                StructuredField.parseSfDictionary(input);
+                break;
+            }
+            default:
+                System.out.println("Type unsupported " + headerType);
+            }
+        } catch (Exception e) {
+            Assert.assertTrue(name + ": raw [" + unescaped + "]", mustFail.booleanValue() || canFail.booleanValue());
+            return;
+        }
+        Assert.assertFalse(name + ": raw [" + unescaped + "]", mustFail.booleanValue());
+    }
+}
diff --git a/webapps/docs/changelog.xml b/webapps/docs/changelog.xml
index 1e951b4fa1..987f48cdcf 100644
--- a/webapps/docs/changelog.xml
+++ b/webapps/docs/changelog.xml
@@ -105,6 +105,15 @@
   issues do not "pop up" wrt. others).
 -->
 <section name="Tomcat 8.5.90 (schultz)" rtext="in development">
+  <subsection name="Coyote">
+    <changelog>
+      <update>
+        Update the HTTP/2 implementation to use the prioritization scheme
+        defined in RFC 9218 rather than the one defined in RFC 7540.
+        (markt)
+      </update>
+    </changelog>
+  </subsection>
 </section>
 <section name="Tomcat 8.5.89 (schultz)" rtext="reelase in progress">
   <subsection name="Catalina">


---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscribe@tomcat.apache.org
For additional commands, e-mail: dev-help@tomcat.apache.org