You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@avro.apache.org by ko...@apache.org on 2021/05/28 20:07:25 UTC

[avro] branch master updated: AVRO-3148: Format Python with Black (#1233)

This is an automated email from the ASF dual-hosted git repository.

kojiromike pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/avro.git


The following commit(s) were added to refs/heads/master by this push:
     new 8c156bd  AVRO-3148: Format Python with Black (#1233)
8c156bd is described below

commit 8c156bd10af22f5bda22a926f86c21e93b12ab4e
Author: Michael A. Smith <mi...@smith-li.com>
AuthorDate: Fri May 28 16:07:13 2021 -0400

    AVRO-3148: Format Python with Black (#1233)
    
    * AVRO-3148: Format Python with Black
    
    Updating tox.ini to use black for lint checks going forward.
    
    Also Ran:
    
    ```
    pyupgrade --py36-plus $(git ls-files '*.py')
    isort --line-length 150 --profile black $(git ls-files '*.py')
    black --line-length 150 $(git ls-files '*.py')
    ```
    
    * AVRO-3148: Only Typecheck/Lint on One Python
    
    The default GitHub Actions image has trouble installing black and mypy.
    Besides, lint checks only need to pass in one version of python.
    So, to save time and energy, we explicitly run these in the latest
    Python only.
---
 .github/workflows/test-lang-py.yml           |   19 +-
 lang/py/avro/__init__.py                     |   16 +-
 lang/py/avro/codecs.py                       |   25 +-
 lang/py/avro/compatibility.py                |  123 ++-
 lang/py/avro/constants.py                    |    4 +-
 lang/py/avro/datafile.py                     |   53 +-
 lang/py/avro/errors.py                       |   21 +-
 lang/py/avro/io.py                           |  260 +++---
 lang/py/avro/ipc.py                          |  106 ++-
 lang/py/avro/protocol.py                     |   90 +-
 lang/py/avro/schema.py                       |  527 ++++++------
 lang/py/avro/test/__init__.py                |    2 -
 lang/py/avro/test/gen_interop_data.py        |   39 +-
 lang/py/avro/test/mock_tether_parent.py      |   47 +-
 lang/py/avro/test/sample_http_client.py      |   18 +-
 lang/py/avro/test/sample_http_server.py      |   64 +-
 lang/py/avro/test/test_bench.py              |    9 +-
 lang/py/avro/test/test_compatibility.py      | 1163 ++++++++++++++++----------
 lang/py/avro/test/test_datafile.py           |   75 +-
 lang/py/avro/test/test_datafile_interop.py   |   19 +-
 lang/py/avro/test/test_init.py               |    7 +-
 lang/py/avro/test/test_io.py                 |   74 +-
 lang/py/avro/test/test_ipc.py                |   12 +-
 lang/py/avro/test/test_protocol.py           |  535 +++++++-----
 lang/py/avro/test/test_schema.py             |  982 +++++++++++++---------
 lang/py/avro/test/test_script.py             |   30 +-
 lang/py/avro/test/test_tether_task.py        |   12 +-
 lang/py/avro/test/test_tether_task_runner.py |   65 +-
 lang/py/avro/test/test_tether_word_count.py  |   69 +-
 lang/py/avro/test/word_count_task.py         |    9 +-
 lang/py/avro/tether/__init__.py              |   10 +-
 lang/py/avro/tether/tether_task.py           |   38 +-
 lang/py/avro/tether/tether_task_runner.py    |   34 +-
 lang/py/avro/tether/util.py                  |    2 -
 lang/py/avro/timezones.py                    |    2 -
 lang/py/avro/tool.py                         |   35 +-
 lang/py/pyproject.toml                       |   20 +
 lang/py/scripts/avro                         |    4 +-
 lang/py/setup.py                             |   58 +-
 lang/py/tox.ini                              |   14 +-
 40 files changed, 2671 insertions(+), 2021 deletions(-)

diff --git a/.github/workflows/test-lang-py.yml b/.github/workflows/test-lang-py.yml
index caa1a03..19522c0 100644
--- a/.github/workflows/test-lang-py.yml
+++ b/.github/workflows/test-lang-py.yml
@@ -36,12 +36,12 @@ jobs:
       fail-fast: false
       matrix:
         python:
-        - '3.6'
-        - '3.7'
-        - '3.8'
         - '3.9'
-        - 'pypy-3.6'
+        - '3.8'
+        - '3.7'
+        - '3.6'
         - 'pypy-3.7'
+        - 'pypy-3.6'
 
     steps:
       - uses: actions/checkout@v2
@@ -64,10 +64,11 @@ jobs:
           python3 -m pip install --upgrade pip setuptools tox-wheel
 
       - name: Lint
+        if: ${{ matrix.python == '3.9' }}
         run: python3 -m tox -e lint
 
       - name: Typechecks
-        if: "! startsWith(matrix.python, 'pypy-')"
+        if: ${{ matrix.python == '3.9' }}
         run: python3 -m tox -e typechecks
 
       - name: Test
@@ -80,12 +81,12 @@ jobs:
       fail-fast: false
       matrix:
         python:
-        - '3.6'
-        - '3.7'
-        - '3.8'
         - '3.9'
-        - 'pypy-3.6'
+        - '3.8'
+        - '3.7'
+        - '3.6'
         - 'pypy-3.7'
+        - 'pypy-3.6'
 
     steps:
       - uses: actions/checkout@v2
diff --git a/lang/py/avro/__init__.py b/lang/py/avro/__init__.py
index 2ccfa35..cb1632f 100644
--- a/lang/py/avro/__init__.py
+++ b/lang/py/avro/__init__.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -21,6 +19,16 @@
 
 import pkgutil
 
-__all__ = ['schema', 'io', 'datafile', 'protocol', 'ipc', 'constants', 'timezones', 'codecs', 'compatibility']
+__all__ = [
+    "schema",
+    "io",
+    "datafile",
+    "protocol",
+    "ipc",
+    "constants",
+    "timezones",
+    "codecs",
+    "compatibility",
+]
 
-__version__ = (pkgutil.get_data(__name__, 'VERSION.txt') or b'0.0.1+unknown').decode().strip()
+__version__ = (pkgutil.get_data(__name__, "VERSION.txt") or b"0.0.1+unknown").decode().strip()
diff --git a/lang/py/avro/codecs.py b/lang/py/avro/codecs.py
index 4922e6d..fa89d77 100644
--- a/lang/py/avro/codecs.py
+++ b/lang/py/avro/codecs.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -41,21 +39,24 @@ import avro.io
 #
 # Constants
 #
-STRUCT_CRC32 = struct.Struct('>I')  # big-endian unsigned int
+STRUCT_CRC32 = struct.Struct(">I")  # big-endian unsigned int
 
 
 try:
     import bz2
+
     has_bzip2 = True
 except ImportError:
     has_bzip2 = False
 try:
     import snappy
+
     has_snappy = True
 except ImportError:
     has_snappy = False
 try:
     import zstandard as zstd
+
     has_zstandard = True
 except ImportError:
     has_zstandard = False
@@ -116,6 +117,7 @@ class DeflateCodec(Codec):
 
 
 if has_bzip2:
+
     class BZip2Codec(Codec):
         def compress(self, data):
             compressed_data = bz2.compress(data)
@@ -129,11 +131,12 @@ if has_bzip2:
 
 
 if has_snappy:
+
     class SnappyCodec(Codec):
         def compress(self, data):
             compressed_data = snappy.compress(data)
             # A 4-byte, big-endian CRC32 checksum
-            compressed_data += STRUCT_CRC32.pack(binascii.crc32(data) & 0xffffffff)
+            compressed_data += STRUCT_CRC32.pack(binascii.crc32(data) & 0xFFFFFFFF)
             return compressed_data, len(compressed_data)
 
         def decompress(self, readers_decoder):
@@ -147,11 +150,12 @@ if has_snappy:
 
         def check_crc32(self, bytes, checksum):
             checksum = STRUCT_CRC32.unpack(checksum)[0]
-            if binascii.crc32(bytes) & 0xffffffff != checksum:
+            if binascii.crc32(bytes) & 0xFFFFFFFF != checksum:
                 raise avro.errors.AvroException("Checksum failure")
 
 
 if has_zstandard:
+
     class ZstandardCodec(Codec):
         def compress(self, data):
             compressed_data = zstd.ZstdCompressor().compress(data)
@@ -183,16 +187,15 @@ def get_codec(codec_name):
         return SnappyCodec()
     if codec_name == "zstandard" and has_zstandard:
         return ZstandardCodec()
-    raise avro.errors.UnsupportedCodec("Unsupported codec: {}. (Is it installed?)"
-                                       .format(codec_name))
+    raise avro.errors.UnsupportedCodec(f"Unsupported codec: {codec_name}. (Is it installed?)")
 
 
 def supported_codec_names():
-    codec_names = ['null', 'deflate']
+    codec_names = ["null", "deflate"]
     if has_bzip2:
-        codec_names.append('bzip2')
+        codec_names.append("bzip2")
     if has_snappy:
-        codec_names.append('snappy')
+        codec_names.append("snappy")
     if has_zstandard:
-        codec_names.append('zstandard')
+        codec_names.append("zstandard")
     return codec_names
diff --git a/lang/py/avro/compatibility.py b/lang/py/avro/compatibility.py
index 1f49c15..1200773 100644
--- a/lang/py/avro/compatibility.py
+++ b/lang/py/avro/compatibility.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -23,7 +21,17 @@ from enum import Enum
 from typing import List, Optional, Set, cast
 
 from avro.errors import AvroRuntimeException
-from avro.schema import ArraySchema, EnumSchema, Field, FixedSchema, MapSchema, NamedSchema, RecordSchema, Schema, UnionSchema
+from avro.schema import (
+    ArraySchema,
+    EnumSchema,
+    Field,
+    FixedSchema,
+    MapSchema,
+    NamedSchema,
+    RecordSchema,
+    Schema,
+    UnionSchema,
+)
 
 
 class SchemaType(str, Enum):
@@ -59,9 +67,14 @@ class SchemaIncompatibilityType(Enum):
 
 
 PRIMITIVE_TYPES = {
-    SchemaType.NULL, SchemaType.BOOLEAN, SchemaType.INT,
-    SchemaType.LONG, SchemaType.FLOAT, SchemaType.DOUBLE,
-    SchemaType.BYTES, SchemaType.STRING
+    SchemaType.NULL,
+    SchemaType.BOOLEAN,
+    SchemaType.INT,
+    SchemaType.LONG,
+    SchemaType.FLOAT,
+    SchemaType.DOUBLE,
+    SchemaType.BYTES,
+    SchemaType.STRING,
 }
 
 
@@ -98,7 +111,10 @@ def merge(this: SchemaCompatibilityResult, that: SchemaCompatibilityResult) -> S
         messages = this.messages.union(that.messages)
         locations = this.locations.union(that.locations)
     return SchemaCompatibilityResult(
-        compatibility=compat, incompatibilities=merged, messages=messages, locations=locations
+        compatibility=compat,
+        incompatibilities=merged,
+        messages=messages,
+        locations=locations,
     )
 
 
@@ -129,7 +145,7 @@ class ReaderWriterCompatibilityChecker:
         reader: Schema,
         writer: Schema,
         reference_token: str = ROOT_REFERENCE_TOKEN,
-        location: Optional[List[str]] = None
+        location: Optional[List[str]] = None,
     ) -> SchemaCompatibilityResult:
         if location is None:
             location = []
@@ -167,10 +183,16 @@ class ReaderWriterCompatibilityChecker:
                 return result
             if reader.type == SchemaType.ARRAY:
                 reader, writer = cast(ArraySchema, reader), cast(ArraySchema, writer)
-                return merge(result, self.get_compatibility(reader.items, writer.items, "items", location))
+                return merge(
+                    result,
+                    self.get_compatibility(reader.items, writer.items, "items", location),
+                )
             if reader.type == SchemaType.MAP:
                 reader, writer = cast(MapSchema, reader), cast(MapSchema, writer)
-                return merge(result, self.get_compatibility(reader.values, writer.values, "values", location))
+                return merge(
+                    result,
+                    self.get_compatibility(reader.values, writer.values, "values", location),
+                )
             if reader.type == SchemaType.FIXED:
                 reader, writer = cast(FixedSchema, reader), cast(FixedSchema, writer)
                 result = merge(result, check_schema_names(reader, writer, location))
@@ -178,11 +200,17 @@ class ReaderWriterCompatibilityChecker:
             if reader.type == SchemaType.ENUM:
                 reader, writer = cast(EnumSchema, reader), cast(EnumSchema, writer)
                 result = merge(result, check_schema_names(reader, writer, location))
-                return merge(result, check_reader_enum_contains_writer_enum(reader, writer, location))
+                return merge(
+                    result,
+                    check_reader_enum_contains_writer_enum(reader, writer, location),
+                )
             if reader.type == SchemaType.RECORD:
                 reader, writer = cast(RecordSchema, reader), cast(RecordSchema, writer)
                 result = merge(result, check_schema_names(reader, writer, location))
-                return merge(result, self.check_reader_writer_record_fields(reader, writer, location))
+                return merge(
+                    result,
+                    self.check_reader_writer_record_fields(reader, writer, location),
+                )
             if reader.type == SchemaType.UNION:
                 reader, writer = cast(UnionSchema, reader), cast(UnionSchema, writer)
                 for i, writer_branch in enumerate(writer.schemas):
@@ -192,11 +220,12 @@ class ReaderWriterCompatibilityChecker:
                             result,
                             incompatible(
                                 SchemaIncompatibilityType.missing_union_branch,
-                                "reader union lacking writer type: {}".format(writer_branch.type.upper()), location + [str(i)]
-                            )
+                                f"reader union lacking writer type: {writer_branch.type.upper()}",
+                                location + [str(i)],
+                            ),
                         )
                 return result
-            raise AvroRuntimeException("Unknown schema type: {}".format(reader.type))
+            raise AvroRuntimeException(f"Unknown schema type: {reader.type}")
         if writer.type == SchemaType.UNION:
             writer = cast(UnionSchema, writer)
             for s in writer.schemas:
@@ -224,7 +253,13 @@ class ReaderWriterCompatibilityChecker:
             if writer.type == SchemaType.BYTES:
                 return result
             return merge(result, type_mismatch(reader, writer, location))
-        if reader.type in {SchemaType.ARRAY, SchemaType.MAP, SchemaType.FIXED, SchemaType.ENUM, SchemaType.RECORD}:
+        if reader.type in {
+            SchemaType.ARRAY,
+            SchemaType.MAP,
+            SchemaType.FIXED,
+            SchemaType.ENUM,
+            SchemaType.RECORD,
+        }:
             return merge(result, type_mismatch(reader, writer, location))
         if reader.type == SchemaType.UNION:
             reader = cast(UnionSchema, reader)
@@ -233,20 +268,16 @@ class ReaderWriterCompatibilityChecker:
                 if compat.compatibility is SchemaCompatibilityType.compatible:
                     return result
             # No branch in reader compatible with writer
-            message = "reader union lacking writer type {}".format(writer.type)
+            message = f"reader union lacking writer type {writer.type}"
             return merge(
                 result,
-                incompatible(
-                    SchemaIncompatibilityType.missing_union_branch, message, location
-                )
+                incompatible(SchemaIncompatibilityType.missing_union_branch, message, location),
             )
-        raise AvroRuntimeException("Unknown schema type: {}".format(reader.type))
+        raise AvroRuntimeException(f"Unknown schema type: {reader.type}")
 
     # pylSchemaType.INT: enable=too-many-return-statements
 
-    def check_reader_writer_record_fields(
-        self, reader: RecordSchema, writer: RecordSchema, location: List[str]
-    ) -> SchemaCompatibilityResult:
+    def check_reader_writer_record_fields(self, reader: RecordSchema, writer: RecordSchema, location: List[str]) -> SchemaCompatibilityResult:
         result = CompatibleResult
         for i, reader_field in enumerate(reader.fields):
             reader_field = cast(Field, reader_field)
@@ -257,50 +288,53 @@ class ReaderWriterCompatibilityChecker:
                         result = merge(
                             result,
                             self.get_compatibility(
-                                reader_field.type, writer, "type", location + ["fields", str(i)]
-                            ))
+                                reader_field.type,
+                                writer,
+                                "type",
+                                location + ["fields", str(i)],
+                            ),
+                        )
                     else:
                         result = merge(
                             result,
                             incompatible(
                                 SchemaIncompatibilityType.reader_field_missing_default_value,
-                                reader_field.name, location + ["fields", str(i)]
-                            )
+                                reader_field.name,
+                                location + ["fields", str(i)],
+                            ),
                         )
             else:
                 result = merge(
                     result,
                     self.get_compatibility(
-                        reader_field.type, writer_field.type, "type", location + ["fields", str(i)]
-                    ))
+                        reader_field.type,
+                        writer_field.type,
+                        "type",
+                        location + ["fields", str(i)],
+                    ),
+                )
         return result
 
 
 def type_mismatch(reader: Schema, writer: Schema, location: List[str]) -> SchemaCompatibilityResult:
-    message = "reader type: {} not compatible with writer type: {}".format(reader.type.upper(), writer.type.upper())
+    message = f"reader type: {reader.type} not compatible with writer type: {writer.type}"
     return incompatible(SchemaIncompatibilityType.type_mismatch, message, location)
 
 
-def check_schema_names(
-    reader: NamedSchema, writer: NamedSchema,
-    location: List[str]
-) -> SchemaCompatibilityResult:
+def check_schema_names(reader: NamedSchema, writer: NamedSchema, location: List[str]) -> SchemaCompatibilityResult:
     result = CompatibleResult
     if not schema_name_equals(reader, writer):
-        message = "expected: {}".format(writer.fullname)
+        message = f"expected: {writer.fullname}"
         result = incompatible(SchemaIncompatibilityType.name_mismatch, message, location + ["name"])
     return result
 
 
-def check_fixed_size(
-    reader: FixedSchema, writer:
-    FixedSchema, location: List[str]
-) -> SchemaCompatibilityResult:
+def check_fixed_size(reader: FixedSchema, writer: FixedSchema, location: List[str]) -> SchemaCompatibilityResult:
     result = CompatibleResult
     actual = reader.size
     expected = writer.size
     if actual != expected:
-        message = "expected: {}, found: {}".format(expected, actual)
+        message = f"expected: {expected}, found: {actual}"
         result = incompatible(
             SchemaIncompatibilityType.fixed_size_mismatch,
             message,
@@ -309,9 +343,7 @@ def check_fixed_size(
     return result
 
 
-def check_reader_enum_contains_writer_enum(
-    reader: EnumSchema, writer: EnumSchema, location: List[str]
-) -> SchemaCompatibilityResult:
+def check_reader_enum_contains_writer_enum(reader: EnumSchema, writer: EnumSchema, location: List[str]) -> SchemaCompatibilityResult:
     result = CompatibleResult
     writer_symbols, reader_symbols = set(writer.symbols), set(reader.symbols)
     extra_symbols = writer_symbols.difference(reader_symbols)
@@ -322,7 +354,8 @@ def check_reader_enum_contains_writer_enum(
         else:
             result = incompatible(
                 SchemaIncompatibilityType.missing_enum_symbols,
-                str(extra_symbols), location + ["symbols"]
+                str(extra_symbols),
+                location + ["symbols"],
             )
     return result
 
diff --git a/lang/py/avro/constants.py b/lang/py/avro/constants.py
index 21a7dd1..ec6dc61 100644
--- a/lang/py/avro/constants.py
+++ b/lang/py/avro/constants.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -37,5 +35,5 @@ SUPPORTED_LOGICAL_TYPE = [
     TIMESTAMP_MILLIS,
     TIME_MICROS,
     TIME_MILLIS,
-    UUID
+    UUID,
 ]
diff --git a/lang/py/avro/datafile.py b/lang/py/avro/datafile.py
index 03125ee..8a660bc 100644
--- a/lang/py/avro/datafile.py
+++ b/lang/py/avro/datafile.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -22,6 +20,7 @@
 """Read/Write Avro File Object Containers."""
 
 import io
+import json
 import os
 import random
 import zlib
@@ -35,21 +34,27 @@ import avro.schema
 # Constants
 #
 VERSION = 1
-MAGIC = bytes(b'Obj' + bytearray([VERSION]))
+MAGIC = bytes(b"Obj" + bytearray([VERSION]))
 MAGIC_SIZE = len(MAGIC)
 SYNC_SIZE = 16
 SYNC_INTERVAL = 4000 * SYNC_SIZE  # TODO(hammer): make configurable
-META_SCHEMA = avro.schema.parse("""\
-{"type": "record", "name": "org.apache.avro.file.Header",
- "fields" : [
-   {"name": "magic", "type": {"type": "fixed", "name": "magic", "size": %d}},
-   {"name": "meta", "type": {"type": "map", "values": "bytes"}},
-   {"name": "sync", "type": {"type": "fixed", "name": "sync", "size": %d}}]}
-""" % (MAGIC_SIZE, SYNC_SIZE))
-
-NULL_CODEC = 'null'
+META_SCHEMA = avro.schema.parse(
+    json.dumps(
+        {
+            "type": "record",
+            "name": "org.apache.avro.file.Header",
+            "fields": [
+                {"name": "magic", "type": {"type": "fixed", "name": "magic", "size": MAGIC_SIZE}},
+                {"name": "meta", "type": {"type": "map", "values": "bytes"}},
+                {"name": "sync", "type": {"type": "fixed", "name": "sync", "size": SYNC_SIZE}},
+            ],
+        }
+    )
+)
+
+NULL_CODEC = "null"
 VALID_CODECS = avro.codecs.supported_codec_names()
-VALID_ENCODINGS = ['binary']  # not used yet
+VALID_ENCODINGS = ["binary"]  # not used yet
 
 CODEC_KEY = "avro.codec"
 SCHEMA_KEY = "avro.schema"
@@ -103,7 +108,7 @@ class _DataFile:
     def codec(self, value):
         """Meta are stored as bytes, but codec is set as a string."""
         if value not in VALID_CODECS:
-            raise avro.errors.DataFileException("Unknown codec: {!r}".format(value))
+            raise avro.errors.DataFileException(f"Unknown codec: {value!r}")
         self.set_meta(CODEC_KEY, value.encode())
 
     @property
@@ -164,9 +169,7 @@ class DataFileWriter(_DataFile):
     buffer_encoder = property(lambda self: self._buffer_encoder)
 
     def _write_header(self):
-        header = {'magic': MAGIC,
-                  'meta': self.meta,
-                  'sync': self.sync_marker}
+        header = {"magic": MAGIC, "meta": self.meta, "sync": self.sync_marker}
         self.datum_writer.write_data(META_SCHEMA, header, self.encoder)
         self._header_written = True
 
@@ -179,7 +182,7 @@ class DataFileWriter(_DataFile):
     def codec(self, value):
         """Meta are stored as bytes, but codec is set as a string."""
         if value not in VALID_CODECS:
-            raise avro.errors.DataFileException("Unknown codec: {!r}".format(value))
+            raise avro.errors.DataFileException(f"Unknown codec: {value!r}")
         self.set_meta(CODEC_KEY, value.encode())
 
     # TODO(hammer): make a schema for blocks and use datum_writer
@@ -241,6 +244,7 @@ class DataFileWriter(_DataFile):
 
 class DataFileReader(_DataFile):
     """Read files written by DataFileWriter."""
+
     # TODO(hammer): allow user to specify expected schema?
     # TODO(hammer): allow user to specify the encoder
 
@@ -288,20 +292,17 @@ class DataFileReader(_DataFile):
         self.reader.seek(0, 0)
 
         # read header into a dict
-        header = self.datum_reader.read_data(
-            META_SCHEMA, META_SCHEMA, self.raw_decoder)
+        header = self.datum_reader.read_data(META_SCHEMA, META_SCHEMA, self.raw_decoder)
 
         # check magic number
-        if header.get('magic') != MAGIC:
-            fail_msg = "Not an Avro data file: %s doesn't match %s."\
-                       % (header.get('magic'), MAGIC)
-            raise avro.errors.AvroException(fail_msg)
+        if header.get("magic") != MAGIC:
+            raise avro.errors.AvroException(f"Not an Avro data file: {header.get('magic')!r} doesn't match {MAGIC!r}.")
 
         # set metadata
-        self._meta = header['meta']
+        self._meta = header["meta"]
 
         # set sync marker
-        self._sync_marker = header['sync']
+        self._sync_marker = header["sync"]
 
     def _read_block_header(self):
         self.block_count = self.raw_decoder.read_long()
diff --git a/lang/py/avro/errors.py b/lang/py/avro/errors.py
index 0a5a7ed..b613e55 100644
--- a/lang/py/avro/errors.py
+++ b/lang/py/avro/errors.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -57,9 +56,7 @@ class AvroTypeException(AvroException):
             expected_schema, datum = args[:2]
         except (IndexError, ValueError):
             return super().__init__(*args)
-        return super().__init__(
-            f"The datum {datum} is not an example of the schema {_safe_pretty(expected_schema)}"
-        )
+        return super().__init__(f"The datum {datum} is not an example of the schema {_safe_pretty(expected_schema)}")
 
 
 class AvroOutOfScaleException(AvroTypeException):
@@ -70,23 +67,13 @@ class AvroOutOfScaleException(AvroTypeException):
             scale, datum, exponent = args[:3]
         except (IndexError, ValueError):
             return super().__init__(*args)
-        return super().__init__(
-            f"The exponent of {datum}, {exponent}, is too large for the schema scale of {scale}"
-        )
+        return super().__init__(f"The exponent of {datum}, {exponent}, is too large for the schema scale of {scale}")
 
 
 class SchemaResolutionException(AvroException):
     def __init__(self, fail_msg, writers_schema=None, readers_schema=None, *args):
-        writers_message = (
-            f"\nWriter's Schema: {_safe_pretty(writers_schema)}"
-            if writers_schema
-            else ""
-        )
-        readers_message = (
-            f"\nReader's Schema: {_safe_pretty(readers_schema)}"
-            if readers_schema
-            else ""
-        )
+        writers_message = f"\nWriter's Schema: {_safe_pretty(writers_schema)}" if writers_schema else ""
+        readers_message = f"\nReader's Schema: {_safe_pretty(readers_schema)}" if readers_schema else ""
         super().__init__((fail_msg or "") + writers_message + readers_message, *args)
 
 
diff --git a/lang/py/avro/io.py b/lang/py/avro/io.py
index 05570bb..0978b3a 100644
--- a/lang/py/avro/io.py
+++ b/lang/py/avro/io.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -101,11 +100,11 @@ import avro.timezones
 
 
 # TODO(hammer): shouldn't ! be < for little-endian (according to spec?)
-STRUCT_FLOAT = struct.Struct('<f')           # big-endian float
-STRUCT_DOUBLE = struct.Struct('<d')          # big-endian double
-STRUCT_SIGNED_SHORT = struct.Struct('>h')    # big-endian signed short
-STRUCT_SIGNED_INT = struct.Struct('>i')      # big-endian signed int
-STRUCT_SIGNED_LONG = struct.Struct('>q')     # big-endian signed long
+STRUCT_FLOAT = struct.Struct("<f")  # big-endian float
+STRUCT_DOUBLE = struct.Struct("<d")  # big-endian double
+STRUCT_SIGNED_SHORT = struct.Struct(">h")  # big-endian signed short
+STRUCT_SIGNED_INT = struct.Struct(">i")  # big-endian signed int
+STRUCT_SIGNED_LONG = struct.Struct(">q")  # big-endian signed long
 
 
 #
@@ -113,7 +112,7 @@ STRUCT_SIGNED_LONG = struct.Struct('>q')     # big-endian signed long
 #
 
 
-ValidationNode = collections.namedtuple("ValidationNode", ['schema', 'datum', 'name'])
+ValidationNode = collections.namedtuple("ValidationNode", ["schema", "datum", "name"])
 
 
 def validate(expected_schema, datum, raise_on_error=False):
@@ -176,11 +175,8 @@ def _default_iterator(_):
     """Immediately raise StopIteration.
 
     This exists to prevent problems with iteration over unsupported container types.
-
-    More efficient approaches are not possible due to support for Python 2.7
     """
-    for item in ():
-        yield item
+    yield from ()
 
 
 def _record_iterator(node):
@@ -206,11 +202,11 @@ def _map_iterator(node):
 
 
 _ITERATORS = {
-    'record': _record_iterator,
-    'array': _array_iterator,
-    'map': _map_iterator,
+    "record": _record_iterator,
+    "array": _array_iterator,
+    "map": _map_iterator,
 }
-_ITERATORS['error'] = _ITERATORS['request'] = _ITERATORS['record']
+_ITERATORS["error"] = _ITERATORS["request"] = _ITERATORS["record"]
 
 
 #
@@ -300,19 +296,19 @@ class BinaryDecoder:
         """
         datum = self.read(size)
         unscaled_datum = 0
-        msb = struct.unpack('!b', datum[0:1])[0]
+        msb = struct.unpack("!b", datum[0:1])[0]
         leftmost_bit = (msb >> 7) & 1
         if leftmost_bit == 1:
             modified_first_byte = ord(datum[0:1]) ^ (1 << 7)
             datum = bytearray([modified_first_byte]) + datum[1:]
             for offset in range(size):
                 unscaled_datum <<= 8
-                unscaled_datum += ord(datum[offset:1 + offset])
+                unscaled_datum += ord(datum[offset : 1 + offset])
             unscaled_datum += pow(-2, (size * 8) - 1)
         else:
             for offset in range(size):
                 unscaled_datum <<= 8
-                unscaled_datum += ord(datum[offset:1 + offset])
+                unscaled_datum += ord(datum[offset : 1 + offset])
 
         original_prec = decimal.getcontext().prec
         try:
@@ -346,17 +342,12 @@ class BinaryDecoder:
 
     def _build_time_object(self, value, scale_to_micro):
         value = value * scale_to_micro
-        value, microseconds = value // 1000000, value % 1000000
-        value, seconds = value // 60, value % 60
-        value, minutes = value // 60, value % 60
+        value, microseconds = divmod(value, 1000000)
+        value, seconds = divmod(value, 60)
+        value, minutes = divmod(value, 60)
         hours = value
 
-        return datetime.time(
-            hour=hours,
-            minute=minutes,
-            second=seconds,
-            microsecond=microseconds
-        )
+        return datetime.time(hour=hours, minute=minutes, second=seconds, microsecond=microseconds)
 
     def read_time_millis_from_int(self):
         """
@@ -465,7 +456,7 @@ class BinaryEncoder:
         """
         datum = (datum << 1) ^ (datum >> 63)
         while (datum & ~0x7F) != 0:
-            self.write(bytearray([(datum & 0x7f) | 0x80]))
+            self.write(bytearray([(datum & 0x7F) | 0x80]))
             datum >>= 7
         self.write(bytearray([datum]))
 
@@ -510,7 +501,7 @@ class BinaryEncoder:
         self.write_long(bytes_req)
         for index in range(bytes_req - 1, -1, -1):
             bits_to_write = packed_bits >> (8 * index)
-            self.write(bytearray([bits_to_write & 0xff]))
+            self.write(bytearray([bits_to_write & 0xFF]))
 
     def write_decimal_fixed(self, datum, scale, size):
         """
@@ -545,20 +536,20 @@ class BinaryEncoder:
             unscaled_datum = mask | unscaled_datum
             for index in range(size - 1, -1, -1):
                 bits_to_write = unscaled_datum >> (8 * index)
-                self.write(bytearray([bits_to_write & 0xff]))
+                self.write(bytearray([bits_to_write & 0xFF]))
         else:
             for i in range(offset_bits // 8):
-                self.write(b'\x00')
+                self.write(b"\x00")
             for index in range(bytes_req - 1, -1, -1):
                 bits_to_write = unscaled_datum >> (8 * index)
-                self.write(bytearray([bits_to_write & 0xff]))
+                self.write(bytearray([bits_to_write & 0xFF]))
 
     def write_bytes(self, datum):
         """
         Bytes are encoded as a long followed by that many bytes of data.
         """
         self.write_long(len(datum))
-        self.write(struct.pack('%ds' % len(datum), datum))
+        self.write(struct.pack(f"{len(datum)}s", datum))
 
     def write_utf8(self, datum):
         """
@@ -594,8 +585,7 @@ class BinaryEncoder:
         self.write_long(microseconds)
 
     def _timedelta_total_microseconds(self, timedelta):
-        return (
-            timedelta.microseconds + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6)
+        return timedelta.microseconds + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6
 
     def write_timestamp_millis_long(self, datum):
         """
@@ -636,13 +626,13 @@ class DatumReader:
     # read/write properties
     def set_writers_schema(self, writers_schema):
         self._writers_schema = writers_schema
-    writers_schema = property(lambda self: self._writers_schema,
-                              set_writers_schema)
+
+    writers_schema = property(lambda self: self._writers_schema, set_writers_schema)
 
     def set_readers_schema(self, readers_schema):
         self._readers_schema = readers_schema
-    readers_schema = property(lambda self: self._readers_schema,
-                              set_readers_schema)
+
+    readers_schema = property(lambda self: self._readers_schema, set_readers_schema)
 
     def read(self, decoder):
         if self.readers_schema is None:
@@ -652,38 +642,38 @@ class DatumReader:
     def read_data(self, writers_schema, readers_schema, decoder):
         # schema matching
         if not readers_schema.match(writers_schema):
-            fail_msg = 'Schemas do not match.'
+            fail_msg = "Schemas do not match."
             raise avro.errors.SchemaResolutionException(fail_msg, writers_schema, readers_schema)
 
-        logical_type = getattr(writers_schema, 'logical_type', None)
+        logical_type = getattr(writers_schema, "logical_type", None)
 
         # function dispatch for reading data based on type of writer's schema
-        if writers_schema.type in ['union', 'error_union']:
+        if writers_schema.type in ["union", "error_union"]:
             return self.read_union(writers_schema, readers_schema, decoder)
 
-        if readers_schema.type in ['union', 'error_union']:
+        if readers_schema.type in ["union", "error_union"]:
             # schema resolution: reader's schema is a union, writer's schema is not
             for s in readers_schema.schemas:
                 if s.match(writers_schema):
                     return self.read_data(writers_schema, s, decoder)
 
             # This shouldn't happen because of the match check at the start of this method.
-            fail_msg = 'Schemas do not match.'
+            fail_msg = "Schemas do not match."
             raise avro.errors.SchemaResolutionException(fail_msg, writers_schema, readers_schema)
 
-        if writers_schema.type == 'null':
+        if writers_schema.type == "null":
             return decoder.read_null()
-        elif writers_schema.type == 'boolean':
+        elif writers_schema.type == "boolean":
             return decoder.read_boolean()
-        elif writers_schema.type == 'string':
+        elif writers_schema.type == "string":
             return decoder.read_utf8()
-        elif writers_schema.type == 'int':
+        elif writers_schema.type == "int":
             if logical_type == avro.constants.DATE:
                 return decoder.read_date_from_int()
             if logical_type == avro.constants.TIME_MILLIS:
                 return decoder.read_time_millis_from_int()
             return decoder.read_int()
-        elif writers_schema.type == 'long':
+        elif writers_schema.type == "long":
             if logical_type == avro.constants.TIME_MICROS:
                 return decoder.read_time_micros_from_long()
             elif logical_type == avro.constants.TIMESTAMP_MILLIS:
@@ -692,70 +682,68 @@ class DatumReader:
                 return decoder.read_timestamp_micros_from_long()
             else:
                 return decoder.read_long()
-        elif writers_schema.type == 'float':
+        elif writers_schema.type == "float":
             return decoder.read_float()
-        elif writers_schema.type == 'double':
+        elif writers_schema.type == "double":
             return decoder.read_double()
-        elif writers_schema.type == 'bytes':
-            if logical_type == 'decimal':
+        elif writers_schema.type == "bytes":
+            if logical_type == "decimal":
                 return decoder.read_decimal_from_bytes(
-                    writers_schema.get_prop('precision'),
-                    writers_schema.get_prop('scale')
+                    writers_schema.get_prop("precision"),
+                    writers_schema.get_prop("scale"),
                 )
             else:
                 return decoder.read_bytes()
-        elif writers_schema.type == 'fixed':
-            if logical_type == 'decimal':
+        elif writers_schema.type == "fixed":
+            if logical_type == "decimal":
                 return decoder.read_decimal_from_fixed(
-                    writers_schema.get_prop('precision'),
-                    writers_schema.get_prop('scale'),
-                    writers_schema.size
+                    writers_schema.get_prop("precision"),
+                    writers_schema.get_prop("scale"),
+                    writers_schema.size,
                 )
             return self.read_fixed(writers_schema, readers_schema, decoder)
-        elif writers_schema.type == 'enum':
+        elif writers_schema.type == "enum":
             return self.read_enum(writers_schema, readers_schema, decoder)
-        elif writers_schema.type == 'array':
+        elif writers_schema.type == "array":
             return self.read_array(writers_schema, readers_schema, decoder)
-        elif writers_schema.type == 'map':
+        elif writers_schema.type == "map":
             return self.read_map(writers_schema, readers_schema, decoder)
-        elif writers_schema.type in ['record', 'error', 'request']:
+        elif writers_schema.type in ["record", "error", "request"]:
             return self.read_record(writers_schema, readers_schema, decoder)
         else:
-            fail_msg = "Cannot read unknown schema type: %s" % writers_schema.type
-            raise avro.errors.AvroException(fail_msg)
+            raise avro.errors.AvroException(f"Cannot read unknown schema type: {writers_schema.type}")
 
     def skip_data(self, writers_schema, decoder):
-        if writers_schema.type == 'null':
+        if writers_schema.type == "null":
             return decoder.skip_null()
-        elif writers_schema.type == 'boolean':
+        elif writers_schema.type == "boolean":
             return decoder.skip_boolean()
-        elif writers_schema.type == 'string':
+        elif writers_schema.type == "string":
             return decoder.skip_utf8()
-        elif writers_schema.type == 'int':
+        elif writers_schema.type == "int":
             return decoder.skip_int()
-        elif writers_schema.type == 'long':
+        elif writers_schema.type == "long":
             return decoder.skip_long()
-        elif writers_schema.type == 'float':
+        elif writers_schema.type == "float":
             return decoder.skip_float()
-        elif writers_schema.type == 'double':
+        elif writers_schema.type == "double":
             return decoder.skip_double()
-        elif writers_schema.type == 'bytes':
+        elif writers_schema.type == "bytes":
             return decoder.skip_bytes()
-        elif writers_schema.type == 'fixed':
+        elif writers_schema.type == "fixed":
             return self.skip_fixed(writers_schema, decoder)
-        elif writers_schema.type == 'enum':
+        elif writers_schema.type == "enum":
             return self.skip_enum(writers_schema, decoder)
-        elif writers_schema.type == 'array':
+        elif writers_schema.type == "array":
             return self.skip_array(writers_schema, decoder)
-        elif writers_schema.type == 'map':
+        elif writers_schema.type == "map":
             return self.skip_map(writers_schema, decoder)
-        elif writers_schema.type in ['union', 'error_union']:
+        elif writers_schema.type in ["union", "error_union"]:
             return self.skip_union(writers_schema, decoder)
-        elif writers_schema.type in ['record', 'error', 'request']:
+        elif writers_schema.type in ["record", "error", "request"]:
             return self.skip_record(writers_schema, decoder)
         else:
-            fail_msg = "Unknown schema type: %s" % writers_schema.type
-            raise avro.errors.AvroException(fail_msg)
+            raise avro.errors.AvroException(f"Unknown schema type: {writers_schema.type}")
 
     def read_fixed(self, writers_schema, readers_schema, decoder):
         """
@@ -775,15 +763,14 @@ class DatumReader:
         # read data
         index_of_symbol = decoder.read_int()
         if index_of_symbol >= len(writers_schema.symbols):
-            fail_msg = "Can't access enum index %d for enum with %d symbols"\
-                       % (index_of_symbol, len(writers_schema.symbols))
-            raise avro.errors.SchemaResolutionException(fail_msg, writers_schema, readers_schema)
+            raise avro.errors.SchemaResolutionException(
+                f"Can't access enum index {index_of_symbole} for enum with {len(writers_schema.symbols)} symbols", writers_schema, readers_schema
+            )
         read_symbol = writers_schema.symbols[index_of_symbol]
 
         # schema resolution
         if read_symbol not in readers_schema.symbols:
-            fail_msg = "Symbol %s not present in Reader's Schema" % read_symbol
-            raise avro.errors.SchemaResolutionException(fail_msg, writers_schema, readers_schema)
+            raise avro.errors.SchemaResolutionException(f"Symbol {read_symbol} not present in Reader's Schema", writers_schema, readers_schema)
 
         return read_symbol
 
@@ -812,8 +799,7 @@ class DatumReader:
                 block_count = -block_count
                 block_size = decoder.read_long()
             for i in range(block_count):
-                read_items.append(self.read_data(writers_schema.items,
-                                                 readers_schema.items, decoder))
+                read_items.append(self.read_data(writers_schema.items, readers_schema.items, decoder))
             block_count = decoder.read_long()
         return read_items
 
@@ -851,8 +837,7 @@ class DatumReader:
                 block_size = decoder.read_long()
             for i in range(block_count):
                 key = decoder.read_utf8()
-                read_items[key] = self.read_data(writers_schema.values,
-                                                 readers_schema.values, decoder)
+                read_items[key] = self.read_data(writers_schema.values, readers_schema.values, decoder)
             block_count = decoder.read_long()
         return read_items
 
@@ -877,9 +862,9 @@ class DatumReader:
         # schema resolution
         index_of_schema = int(decoder.read_long())
         if index_of_schema >= len(writers_schema.schemas):
-            fail_msg = "Can't access branch index %d for union with %d branches"\
-                       % (index_of_schema, len(writers_schema.schemas))
-            raise avro.errors.SchemaResolutionException(fail_msg, writers_schema, readers_schema)
+            raise avro.errors.SchemaResolutionException(
+                f"Can't access branch index {index_of_schema} for union with {len(writers_schema.schemas)} branches", writers_schema, readers_schema
+            )
         selected_writers_schema = writers_schema.schemas[index_of_schema]
 
         # read data
@@ -888,9 +873,9 @@ class DatumReader:
     def skip_union(self, writers_schema, decoder):
         index_of_schema = int(decoder.read_long())
         if index_of_schema >= len(writers_schema.schemas):
-            fail_msg = "Can't access branch index %d for union with %d branches"\
-                       % (index_of_schema, len(writers_schema.schemas))
-            raise avro.errors.SchemaResolutionException(fail_msg, writers_schema)
+            raise avro.errors.SchemaResolutionException(
+                f"Can't access branch index {index_of_schema} for union with {len(writers_schema.schemas)} branches", writers_schema
+            )
         return self.skip_data(writers_schema.schemas[index_of_schema], decoder)
 
     def read_record(self, writers_schema, readers_schema, decoder):
@@ -929,13 +914,10 @@ class DatumReader:
             writers_fields_dict = writers_schema.fields_dict
             for field_name, field in readers_fields_dict.items():
                 if field_name not in writers_fields_dict:
-                    if field.has_default:
-                        field_val = self._read_default_value(field.type, field.default)
-                        read_record[field.name] = field_val
-                    else:
-                        fail_msg = 'No default value for field %s' % field_name
-                        raise avro.errors.SchemaResolutionException(fail_msg, writers_schema,
-                                                                    readers_schema)
+                    if not field.has_default:
+                        raise avro.errors.SchemaResolutionException(f"No default value for field {field_name}", writers_schema, readers_schema)
+                    field_val = self._read_default_value(field.type, field.default)
+                    read_record[field.name] = field_val
         return read_record
 
     def skip_record(self, writers_schema, decoder):
@@ -946,33 +928,33 @@ class DatumReader:
         """
         Basically a JSON Decoder?
         """
-        if field_schema.type == 'null':
+        if field_schema.type == "null":
             return None
-        elif field_schema.type == 'boolean':
+        elif field_schema.type == "boolean":
             return bool(default_value)
-        elif field_schema.type == 'int':
+        elif field_schema.type == "int":
             return int(default_value)
-        elif field_schema.type == 'long':
+        elif field_schema.type == "long":
             return int(default_value)
-        elif field_schema.type in ['float', 'double']:
+        elif field_schema.type in ["float", "double"]:
             return float(default_value)
-        elif field_schema.type in ['enum', 'fixed', 'string', 'bytes']:
+        elif field_schema.type in ["enum", "fixed", "string", "bytes"]:
             return default_value
-        elif field_schema.type == 'array':
+        elif field_schema.type == "array":
             read_array = []
             for json_val in default_value:
                 item_val = self._read_default_value(field_schema.items, json_val)
                 read_array.append(item_val)
             return read_array
-        elif field_schema.type == 'map':
+        elif field_schema.type == "map":
             read_map = {}
             for key, json_val in default_value.items():
                 map_val = self._read_default_value(field_schema.values, json_val)
                 read_map[key] = map_val
             return read_map
-        elif field_schema.type in ['union', 'error_union']:
+        elif field_schema.type in ["union", "error_union"]:
             return self._read_default_value(field_schema.schemas[0], default_value)
-        elif field_schema.type == 'record':
+        elif field_schema.type == "record":
             read_record = {}
             for field in field_schema.fields:
                 json_val = default_value.get(field.name)
@@ -982,8 +964,7 @@ class DatumReader:
                 read_record[field.name] = field_val
             return read_record
         else:
-            fail_msg = 'Unknown type: %s' % field_schema.type
-            raise avro.errors.AvroException(fail_msg)
+            raise avro.errors.AvroException(f"Unknown type: {field_schema.type}")
 
 
 class DatumWriter:
@@ -995,8 +976,8 @@ class DatumWriter:
     # read/write properties
     def set_writers_schema(self, writers_schema):
         self._writers_schema = writers_schema
-    writers_schema = property(lambda self: self._writers_schema,
-                              set_writers_schema)
+
+    writers_schema = property(lambda self: self._writers_schema, set_writers_schema)
 
     def write(self, datum, encoder):
         validate(self.writers_schema, datum, raise_on_error=True)
@@ -1004,21 +985,21 @@ class DatumWriter:
 
     def write_data(self, writers_schema, datum, encoder):
         # function dispatch to write datum
-        logical_type = getattr(writers_schema, 'logical_type', None)
-        if writers_schema.type == 'null':
+        logical_type = getattr(writers_schema, "logical_type", None)
+        if writers_schema.type == "null":
             encoder.write_null(datum)
-        elif writers_schema.type == 'boolean':
+        elif writers_schema.type == "boolean":
             encoder.write_boolean(datum)
-        elif writers_schema.type == 'string':
+        elif writers_schema.type == "string":
             encoder.write_utf8(datum)
-        elif writers_schema.type == 'int':
+        elif writers_schema.type == "int":
             if logical_type == avro.constants.DATE:
                 encoder.write_date_int(datum)
             elif logical_type == avro.constants.TIME_MILLIS:
                 encoder.write_time_millis_int(datum)
             else:
                 encoder.write_int(datum)
-        elif writers_schema.type == 'long':
+        elif writers_schema.type == "long":
             if logical_type == avro.constants.TIME_MICROS:
                 encoder.write_time_micros_long(datum)
             elif logical_type == avro.constants.TIMESTAMP_MILLIS:
@@ -1027,37 +1008,36 @@ class DatumWriter:
                 encoder.write_timestamp_micros_long(datum)
             else:
                 encoder.write_long(datum)
-        elif writers_schema.type == 'float':
+        elif writers_schema.type == "float":
             encoder.write_float(datum)
-        elif writers_schema.type == 'double':
+        elif writers_schema.type == "double":
             encoder.write_double(datum)
-        elif writers_schema.type == 'bytes':
-            if logical_type == 'decimal':
-                encoder.write_decimal_bytes(datum, writers_schema.get_prop('scale'))
+        elif writers_schema.type == "bytes":
+            if logical_type == "decimal":
+                encoder.write_decimal_bytes(datum, writers_schema.get_prop("scale"))
             else:
                 encoder.write_bytes(datum)
-        elif writers_schema.type == 'fixed':
-            if logical_type == 'decimal':
+        elif writers_schema.type == "fixed":
+            if logical_type == "decimal":
                 encoder.write_decimal_fixed(
                     datum,
-                    writers_schema.get_prop('scale'),
-                    writers_schema.get_prop('size')
+                    writers_schema.get_prop("scale"),
+                    writers_schema.get_prop("size"),
                 )
             else:
                 self.write_fixed(writers_schema, datum, encoder)
-        elif writers_schema.type == 'enum':
+        elif writers_schema.type == "enum":
             self.write_enum(writers_schema, datum, encoder)
-        elif writers_schema.type == 'array':
+        elif writers_schema.type == "array":
             self.write_array(writers_schema, datum, encoder)
-        elif writers_schema.type == 'map':
+        elif writers_schema.type == "map":
             self.write_map(writers_schema, datum, encoder)
-        elif writers_schema.type in ['union', 'error_union']:
+        elif writers_schema.type in ["union", "error_union"]:
             self.write_union(writers_schema, datum, encoder)
-        elif writers_schema.type in ['record', 'error', 'request']:
+        elif writers_schema.type in ["record", "error", "request"]:
             self.write_record(writers_schema, datum, encoder)
         else:
-            fail_msg = 'Unknown type: %s' % writers_schema.type
-            raise avro.errors.AvroException(fail_msg)
+            raise avro.errors.AvroException(f"Unknown type: {writers_schema.type}")
 
     def write_fixed(self, writers_schema, datum, encoder):
         """
diff --git a/lang/py/avro/ipc.py b/lang/py/avro/ipc.py
index 458a722..2c3ff66 100644
--- a/lang/py/avro/ipc.py
+++ b/lang/py/avro/ipc.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -35,12 +33,12 @@ import avro.schema
 def _load(name):
     dir_path = os.path.dirname(__file__)
     rsrc_path = os.path.join(dir_path, name)
-    with open(rsrc_path, 'r') as f:
+    with open(rsrc_path) as f:
         return f.read()
 
 
-HANDSHAKE_REQUEST_SCHEMA_JSON = _load('HandshakeRequest.avsc')
-HANDSHAKE_RESPONSE_SCHEMA_JSON = _load('HandshakeResponse.avsc')
+HANDSHAKE_REQUEST_SCHEMA_JSON = _load("HandshakeRequest.avsc")
+HANDSHAKE_RESPONSE_SCHEMA_JSON = _load("HandshakeResponse.avsc")
 HANDSHAKE_REQUEST_SCHEMA = avro.schema.parse(HANDSHAKE_REQUEST_SCHEMA_JSON)
 HANDSHAKE_RESPONSE_SCHEMA = avro.schema.parse(HANDSHAKE_RESPONSE_SCHEMA_JSON)
 
@@ -59,7 +57,7 @@ SYSTEM_ERROR_SCHEMA = avro.schema.parse('["string"]')
 REMOTE_HASHES = {}
 REMOTE_PROTOCOLS = {}
 
-BIG_ENDIAN_INT_STRUCT = struct.Struct('!I')
+BIG_ENDIAN_INT_STRUCT = struct.Struct("!I")
 BUFFER_HEADER_LENGTH = 4
 BUFFER_SIZE = 8192
 
@@ -86,16 +84,18 @@ class BaseRequestor:
     def set_remote_protocol(self, new_remote_protocol):
         self._remote_protocol = new_remote_protocol
         REMOTE_PROTOCOLS[self.transceiver.remote_name] = self.remote_protocol
-    remote_protocol = property(lambda self: self._remote_protocol,
-                               set_remote_protocol)
+
+    remote_protocol = property(lambda self: self._remote_protocol, set_remote_protocol)
 
     def set_remote_hash(self, new_remote_hash):
         self._remote_hash = new_remote_hash
         REMOTE_HASHES[self.transceiver.remote_name] = self.remote_hash
+
     remote_hash = property(lambda self: self._remote_hash, set_remote_hash)
 
     def set_send_protocol(self, new_send_protocol):
         self._send_protocol = new_send_protocol
+
     send_protocol = property(lambda self: self._send_protocol, set_send_protocol)
 
     def request(self, message_name, request_datum):
@@ -120,10 +120,10 @@ class BaseRequestor:
             remote_hash = local_hash
             self.remote_protocol = self.local_protocol
         request_datum = {}
-        request_datum['clientHash'] = local_hash
-        request_datum['serverHash'] = remote_hash
+        request_datum["clientHash"] = local_hash
+        request_datum["serverHash"] = remote_hash
         if self.send_protocol:
-            request_datum['clientProtocol'] = str(self.local_protocol)
+            request_datum["clientProtocol"] = str(self.local_protocol)
         HANDSHAKE_REQUESTOR_WRITER.write(request_datum, encoder)
 
     def write_call_request(self, message_name, request_datum, encoder):
@@ -141,7 +141,7 @@ class BaseRequestor:
         # message name
         message = self.local_protocol.messages.get(message_name)
         if message is None:
-            raise avro.errors.AvroException('Unknown message: %s' % message_name)
+            raise avro.errors.AvroException(f"Unknown message: {message_name}")
         encoder.write_utf8(message.name)
 
         # message parameters
@@ -153,28 +153,26 @@ class BaseRequestor:
 
     def read_handshake_response(self, decoder):
         handshake_response = HANDSHAKE_REQUESTOR_READER.read(decoder)
-        match = handshake_response.get('match')
-        if match == 'BOTH':
+        match = handshake_response.get("match")
+        if match == "BOTH":
             self.send_protocol = False
             return True
-        elif match == 'CLIENT':
+        elif match == "CLIENT":
             if self.send_protocol:
-                raise avro.errors.AvroException('Handshake failure.')
-            self.remote_protocol = avro.protocol.parse(
-                handshake_response.get('serverProtocol'))
-            self.remote_hash = handshake_response.get('serverHash')
+                raise avro.errors.AvroException("Handshake failure.")
+            self.remote_protocol = avro.protocol.parse(handshake_response.get("serverProtocol"))
+            self.remote_hash = handshake_response.get("serverHash")
             self.send_protocol = False
             return True
-        elif match == 'NONE':
+        elif match == "NONE":
             if self.send_protocol:
-                raise avro.errors.AvroException('Handshake failure.')
-            self.remote_protocol = avro.protocol.parse(
-                handshake_response.get('serverProtocol'))
-            self.remote_hash = handshake_response.get('serverHash')
+                raise avro.errors.AvroException("Handshake failure.")
+            self.remote_protocol = avro.protocol.parse(handshake_response.get("serverProtocol"))
+            self.remote_hash = handshake_response.get("serverHash")
             self.send_protocol = True
             return False
         else:
-            raise avro.errors.AvroException('Unexpected match: %s' % match)
+            raise avro.errors.AvroException(f"Unexpected match: {match}")
 
     def read_call_response(self, message_name, decoder):
         """
@@ -192,12 +190,12 @@ class BaseRequestor:
         # remote response schema
         remote_message_schema = self.remote_protocol.messages.get(message_name)
         if remote_message_schema is None:
-            raise avro.errors.AvroException('Unknown remote message: %s' % message_name)
+            raise avro.errors.AvroException(f"Unknown remote message: {message_name}")
 
         # local response schema
         local_message_schema = self.local_protocol.messages.get(message_name)
         if local_message_schema is None:
-            raise avro.errors.AvroException('Unknown local message: %s' % message_name)
+            raise avro.errors.AvroException(f"Unknown local message: {message_name}")
 
         # error flag
         if not decoder.read_boolean():
@@ -217,7 +215,6 @@ class BaseRequestor:
 
 
 class Requestor(BaseRequestor):
-
     def issue_request(self, call_request, message_name, request_datum):
         call_response = self.transceiver.transceive(call_request)
 
@@ -276,24 +273,23 @@ class Responder:
             # schema resolution (one fine day)
             remote_message = remote_protocol.messages.get(remote_message_name)
             if remote_message is None:
-                fail_msg = 'Unknown remote message: %s' % remote_message_name
+                fail_msg = f"Unknown remote message: {remote_message_name}"
                 raise avro.errors.AvroException(fail_msg)
             local_message = self.local_protocol.messages.get(remote_message_name)
             if local_message is None:
-                fail_msg = 'Unknown local message: %s' % remote_message_name
+                fail_msg = f"Unknown local message: {remote_message_name}"
                 raise avro.errors.AvroException(fail_msg)
             writers_schema = remote_message.request
             readers_schema = local_message.request
-            request = self.read_request(writers_schema, readers_schema,
-                                        buffer_decoder)
+            request = self.read_request(writers_schema, readers_schema, buffer_decoder)
 
             # perform server logic
             try:
                 response = self.invoke(local_message, request)
-            except AvroRemoteException as e:
+            except avro.errors.AvroRemoteException as e:
                 error = e
             except Exception as e:
-                error = AvroRemoteException(str(e))
+                error = avro.errors.AvroRemoteException(str(e))
 
             # write response using local protocol
             META_WRITER.write(response_metadata, buffer_encoder)
@@ -305,7 +301,7 @@ class Responder:
                 writers_schema = local_message.errors
                 self.write_error(writers_schema, error, buffer_encoder)
         except schema.AvroException as e:
-            error = AvroRemoteException(str(e))
+            error = avro.errors.AvroRemoteException(str(e))
             buffer_encoder = avro.io.BinaryEncoder(io.BytesIO())
             META_WRITER.write(response_metadata, buffer_encoder)
             buffer_encoder.write_boolean(True)
@@ -317,29 +313,29 @@ class Responder:
         handshake_response = {}
 
         # determine the remote protocol
-        client_hash = handshake_request.get('clientHash')
-        client_protocol = handshake_request.get('clientProtocol')
+        client_hash = handshake_request.get("clientHash")
+        client_protocol = handshake_request.get("clientProtocol")
         remote_protocol = self.get_protocol_cache(client_hash)
         if remote_protocol is None and client_protocol is not None:
             remote_protocol = avro.protocol.parse(client_protocol)
             self.set_protocol_cache(client_hash, remote_protocol)
 
         # evaluate remote's guess of the local protocol
-        server_hash = handshake_request.get('serverHash')
+        server_hash = handshake_request.get("serverHash")
         if self.local_hash == server_hash:
             if remote_protocol is None:
-                handshake_response['match'] = 'NONE'
+                handshake_response["match"] = "NONE"
             else:
-                handshake_response['match'] = 'BOTH'
+                handshake_response["match"] = "BOTH"
         else:
             if remote_protocol is None:
-                handshake_response['match'] = 'NONE'
+                handshake_response["match"] = "NONE"
             else:
-                handshake_response['match'] = 'CLIENT'
+                handshake_response["match"] = "CLIENT"
 
-        if handshake_response['match'] != 'BOTH':
-            handshake_response['serverProtocol'] = str(self.local_protocol)
-            handshake_response['serverHash'] = self.local_hash
+        if handshake_response["match"] != "BOTH":
+            handshake_response["serverProtocol"] = str(self.local_protocol)
+            handshake_response["serverHash"] = self.local_hash
 
         HANDSHAKE_RESPONDER_WRITER.write(handshake_response, encoder)
         return remote_protocol
@@ -362,6 +358,7 @@ class Responder:
         datum_writer = avro.io.DatumWriter(writers_schema)
         datum_writer.write(str(error_exception), encoder)
 
+
 #
 # Utility classes
 #
@@ -382,17 +379,17 @@ class FramedReader:
             buffer = io.BytesIO()
             buffer_length = self._read_buffer_length()
             if buffer_length == 0:
-                return b''.join(message)
+                return b"".join(message)
             while buffer.tell() < buffer_length:
                 chunk = self.reader.read(buffer_length - buffer.tell())
-                if chunk == '':
+                if chunk == "":
                     raise avro.errors.ConnectionClosedException("Reader read 0 bytes.")
                 buffer.write(chunk)
             message.append(buffer.getvalue())
 
     def _read_buffer_length(self):
         read = self.reader.read(BUFFER_HEADER_LENGTH)
-        if read == '':
+        if read == "":
             raise avro.errors.ConnectionClosedException("Reader read 0 bytes.")
         return BIG_ENDIAN_INT_STRUCT.unpack(read)[0]
 
@@ -414,8 +411,7 @@ class FramedWriter:
                 buffer_length = BUFFER_SIZE
             else:
                 buffer_length = message_length - total_bytes_sent
-            self.write_buffer(message[total_bytes_sent:
-                                      (total_bytes_sent + buffer_length)])
+            self.write_buffer(message[total_bytes_sent : (total_bytes_sent + buffer_length)])
             total_bytes_sent += buffer_length
         # A message is always terminated by a zero-length buffer.
         self.write_buffer_length(0)
@@ -428,6 +424,7 @@ class FramedWriter:
     def write_buffer_length(self, n):
         self.writer.write(BIG_ENDIAN_INT_STRUCT.pack(n))
 
+
 #
 # Transceiver Implementations
 #
@@ -439,7 +436,7 @@ class HTTPTransceiver:
     Useful for clients but not for servers
     """
 
-    def __init__(self, host, port, req_resource='/'):
+    def __init__(self, host, port, req_resource="/"):
         self.req_resource = req_resource
         self.conn = http.client.HTTPConnection(host, port)
         self.conn.connect()
@@ -454,12 +451,12 @@ class HTTPTransceiver:
         response = self.conn.getresponse()
         response_reader = FramedReader(response)
         framed_message = response_reader.read_framed_message()
-        response.read()    # ensure we're ready for subsequent requests
+        response.read()  # ensure we're ready for subsequent requests
         return framed_message
 
     def write_framed_message(self, message):
-        req_method = 'POST'
-        req_headers = {'Content-Type': 'avro/binary'}
+        req_method = "POST"
+        req_headers = {"Content-Type": "avro/binary"}
 
         req_body_buffer = FramedWriter(io.BytesIO())
         req_body_buffer.write_framed_message(message)
@@ -470,6 +467,7 @@ class HTTPTransceiver:
     def close(self):
         self.conn.close()
 
+
 #
 # Server Implementations (none yet)
 #
diff --git a/lang/py/avro/protocol.py b/lang/py/avro/protocol.py
index afe6d63..f9296c9 100644
--- a/lang/py/avro/protocol.py
+++ b/lang/py/avro/protocol.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -32,7 +30,7 @@ import avro.schema
 #
 
 # TODO(hammer): confirmed 'fixed' with Doug
-VALID_TYPE_SCHEMA_TYPES = ('enum', 'record', 'error', 'fixed')
+VALID_TYPE_SCHEMA_TYPES = ("enum", "record", "error", "fixed")
 
 #
 # Base Classes
@@ -47,7 +45,7 @@ class Protocol:
         for type in types:
             type_object = avro.schema.make_avsc_object(type, type_names)
             if type_object.type not in VALID_TYPE_SCHEMA_TYPES:
-                fail_msg = 'Type %s not an enum, fixed, record, or error.' % type
+                fail_msg = f"Type {type} not an enum, fixed, record, or error."
                 raise avro.errors.ProtocolParseException(fail_msg)
             type_objects.append(type_object)
         return type_objects
@@ -56,14 +54,14 @@ class Protocol:
         message_objects = {}
         for name, body in messages.items():
             if name in message_objects:
-                fail_msg = 'Message name "%s" repeated.' % name
+                fail_msg = f'Message name "{name}" repeated.'
                 raise avro.errors.ProtocolParseException(fail_msg)
             try:
-                request = body.get('request')
-                response = body.get('response')
-                errors = body.get('errors')
+                request = body.get("request")
+                response = body.get("response")
+                errors = body.get("errors")
             except AttributeError:
-                fail_msg = 'Message name "%s" has non-object body %s.' % (name, body)
+                fail_msg = f'Message name "{name}" has non-object body {body}.'
                 raise avro.errors.ProtocolParseException(fail_msg)
             message_objects[name] = Message(name, request, response, errors, names)
         return message_objects
@@ -71,41 +69,41 @@ class Protocol:
     def __init__(self, name, namespace=None, types=None, messages=None):
         # Ensure valid ctor args
         if not name:
-            fail_msg = 'Protocols must have a non-empty name.'
+            fail_msg = "Protocols must have a non-empty name."
             raise avro.errors.ProtocolParseException(fail_msg)
         elif not isinstance(name, str):
-            fail_msg = 'The name property must be a string.'
+            fail_msg = "The name property must be a string."
             raise avro.errors.ProtocolParseException(fail_msg)
         elif not (namespace is None or isinstance(namespace, str)):
-            fail_msg = 'The namespace property must be a string.'
+            fail_msg = "The namespace property must be a string."
             raise avro.errors.ProtocolParseException(fail_msg)
         elif not (types is None or isinstance(types, list)):
-            fail_msg = 'The types property must be a list.'
+            fail_msg = "The types property must be a list."
             raise avro.errors.ProtocolParseException(fail_msg)
-        elif not (messages is None or callable(getattr(messages, 'get', None))):
-            fail_msg = 'The messages property must be a JSON object.'
+        elif not (messages is None or callable(getattr(messages, "get", None))):
+            fail_msg = "The messages property must be a JSON object."
             raise avro.errors.ProtocolParseException(fail_msg)
 
         self._props = {}
-        self.set_prop('name', name)
+        self.set_prop("name", name)
         type_names = avro.schema.Names()
         if namespace is not None:
-            self.set_prop('namespace', namespace)
+            self.set_prop("namespace", namespace)
             type_names.default_namespace = namespace
         if types is not None:
-            self.set_prop('types', self._parse_types(types, type_names))
+            self.set_prop("types", self._parse_types(types, type_names))
         if messages is not None:
-            self.set_prop('messages', self._parse_messages(messages, type_names))
+            self.set_prop("messages", self._parse_messages(messages, type_names))
         self._md5 = hashlib.md5(str(self).encode()).digest()
 
     # read-only properties
     @property
     def name(self):
-        return self.get_prop('name')
+        return self.get_prop("name")
 
     @property
     def namespace(self):
-        return self.get_prop('namespace')
+        return self.get_prop("namespace")
 
     @property
     def fullname(self):
@@ -113,7 +111,7 @@ class Protocol:
 
     @property
     def types(self):
-        return self.get_prop('types')
+        return self.get_prop("types")
 
     @property
     def types_dict(self):
@@ -121,7 +119,7 @@ class Protocol:
 
     @property
     def messages(self):
-        return self.get_prop('messages')
+        return self.get_prop("messages")
 
     @property
     def md5(self):
@@ -140,17 +138,17 @@ class Protocol:
 
     def to_json(self):
         to_dump = {}
-        to_dump['protocol'] = self.name
+        to_dump["protocol"] = self.name
         names = avro.schema.Names(default_namespace=self.namespace)
         if self.namespace:
-            to_dump['namespace'] = self.namespace
+            to_dump["namespace"] = self.namespace
         if self.types:
-            to_dump['types'] = [t.to_json(names) for t in self.types]
+            to_dump["types"] = [t.to_json(names) for t in self.types]
         if self.messages:
             messages_dict = {}
             for name, body in self.messages.items():
                 messages_dict[name] = body.to_json(names)
-            to_dump['messages'] = messages_dict
+            to_dump["messages"] = messages_dict
         return to_dump
 
     def __str__(self):
@@ -166,9 +164,9 @@ class Message:
 
     def _parse_request(self, request, names):
         if not isinstance(request, list):
-            fail_msg = 'Request property not a list: %s' % request
+            fail_msg = f"Request property not a list: {request}"
             raise avro.errors.ProtocolParseException(fail_msg)
-        return avro.schema.RecordSchema(None, None, request, names, 'request')
+        return avro.schema.RecordSchema(None, None, request, names, "request")
 
     def _parse_response(self, response, names):
         if isinstance(response, str) and names.has_name(response, None):
@@ -178,24 +176,24 @@ class Message:
 
     def _parse_errors(self, errors, names):
         if not isinstance(errors, list):
-            fail_msg = 'Errors property not a list: %s' % errors
+            fail_msg = f"Errors property not a list: {errors}"
             raise avro.errors.ProtocolParseException(fail_msg)
-        errors_for_parsing = {'type': 'error_union', 'declared_errors': errors}
+        errors_for_parsing = {"type": "error_union", "declared_errors": errors}
         return avro.schema.make_avsc_object(errors_for_parsing, names)
 
     def __init__(self, name, request, response, errors=None, names=None):
         self._name = name
 
         self._props = {}
-        self.set_prop('request', self._parse_request(request, names))
-        self.set_prop('response', self._parse_response(response, names))
-        self.set_prop('errors', self._parse_errors(errors or [], names))
+        self.set_prop("request", self._parse_request(request, names))
+        self.set_prop("response", self._parse_response(response, names))
+        self.set_prop("errors", self._parse_errors(errors or [], names))
 
     # read-only properties
     name = property(lambda self: self._name)
-    request = property(lambda self: self.get_prop('request'))
-    response = property(lambda self: self.get_prop('response'))
-    errors = property(lambda self: self.get_prop('errors'))
+    request = property(lambda self: self.get_prop("request"))
+    response = property(lambda self: self.get_prop("response"))
+    errors = property(lambda self: self.get_prop("errors"))
     props = property(lambda self: self._props)
 
     # utility functions to manipulate properties dict
@@ -212,10 +210,10 @@ class Message:
         names = names or avro.schema.Names()
 
         to_dump = {}
-        to_dump['request'] = self.request.to_json(names)
-        to_dump['response'] = self.response.to_json(names)
+        to_dump["request"] = self.request.to_json(names)
+        to_dump["response"] = self.response.to_json(names)
         if self.errors:
-            to_dump['errors'] = self.errors.to_json(names)
+            to_dump["errors"] = self.errors.to_json(names)
 
         return to_dump
 
@@ -226,12 +224,12 @@ class Message:
 def make_avpr_object(json_data):
     """Build Avro Protocol from data parsed out of JSON string."""
     try:
-        name = json_data.get('protocol')
-        namespace = json_data.get('namespace')
-        types = json_data.get('types')
-        messages = json_data.get('messages')
+        name = json_data.get("protocol")
+        namespace = json_data.get("namespace")
+        types = json_data.get("types")
+        messages = json_data.get("messages")
     except AttributeError:
-        raise avro.errors.ProtocolParseException('Not a JSON object: %s' % json_data)
+        raise avro.errors.ProtocolParseException(f"Not a JSON object: {json_data}")
 
     return Protocol(name, namespace, types, messages)
 
@@ -241,7 +239,7 @@ def parse(json_string):
     try:
         json_data = json.loads(json_string)
     except ValueError:
-        raise avro.errors.ProtocolParseException('Error parsing JSON: %s' % json_string)
+        raise avro.errors.ProtocolParseException(f"Error parsing JSON: {json_string}")
 
     # construct the Avro Protocol object
     return make_avpr_object(json_data)
diff --git a/lang/py/avro/schema.py b/lang/py/avro/schema.py
index 2f4152c..43c264d 100644
--- a/lang/py/avro/schema.py
+++ b/lang/py/avro/schema.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -60,68 +59,62 @@ import avro.errors
 # The name portion of a fullname, record field names, and enum symbols must:
 # start with [A-Za-z_]
 # subsequently contain only [A-Za-z0-9_]
-_BASE_NAME_PATTERN = re.compile(r'(?:^|\.)[A-Za-z_][A-Za-z0-9_]*$')
+_BASE_NAME_PATTERN = re.compile(r"(?:^|\.)[A-Za-z_][A-Za-z0-9_]*$")
 
 PRIMITIVE_TYPES = (
-    'null',
-    'boolean',
-    'string',
-    'bytes',
-    'int',
-    'long',
-    'float',
-    'double',
+    "null",
+    "boolean",
+    "string",
+    "bytes",
+    "int",
+    "long",
+    "float",
+    "double",
 )
 
 NAMED_TYPES = (
-    'fixed',
-    'enum',
-    'record',
-    'error',
+    "fixed",
+    "enum",
+    "record",
+    "error",
 )
 
-VALID_TYPES = PRIMITIVE_TYPES + NAMED_TYPES + (
-    'array',
-    'map',
-    'union',
-    'request',
-    'error_union'
-)
+VALID_TYPES = PRIMITIVE_TYPES + NAMED_TYPES + ("array", "map", "union", "request", "error_union")
 
 SCHEMA_RESERVED_PROPS = (
-    'type',
-    'name',
-    'namespace',
-    'fields',     # Record
-    'items',      # Array
-    'size',       # Fixed
-    'symbols',    # Enum
-    'values',     # Map
-    'doc',
+    "type",
+    "name",
+    "namespace",
+    "fields",  # Record
+    "items",  # Array
+    "size",  # Fixed
+    "symbols",  # Enum
+    "values",  # Map
+    "doc",
 )
 
 FIELD_RESERVED_PROPS = (
-    'default',
-    'name',
-    'doc',
-    'order',
-    'type',
+    "default",
+    "name",
+    "doc",
+    "order",
+    "type",
 )
 
 VALID_FIELD_SORT_ORDERS = (
-    'ascending',
-    'descending',
-    'ignore',
+    "ascending",
+    "descending",
+    "ignore",
 )
 
 CANONICAL_FIELD_ORDER = (
-    'name',
-    'type',
-    'fields',
-    'symbols',
-    'items',
-    'values',
-    'size',
+    "name",
+    "type",
+    "fields",
+    "symbols",
+    "items",
+    "values",
+    "size",
 )
 
 INT_MIN_VALUE = -(1 << 31)
@@ -133,10 +126,7 @@ LONG_MAX_VALUE = (1 << 63) - 1
 def validate_basename(basename):
     """Raise InvalidName if the given basename is not a valid name."""
     if not _BASE_NAME_PATTERN.search(basename):
-        raise avro.errors.InvalidName(
-                "{!s} is not a valid Avro name because it "
-                "does not match the pattern {!s}".format(
-                    basename, _BASE_NAME_PATTERN.pattern))
+        raise avro.errors.InvalidName(f"{basename!s} is not a valid Avro name because it does not match the pattern {_BASE_NAME_PATTERN.pattern!s}")
 
 
 def _is_timezone_aware_datetime(dt):
@@ -147,8 +137,10 @@ def _is_timezone_aware_datetime(dt):
 # Base Classes
 #
 
+
 class EqualByJsonMixin:
     """Equal if the json serializations are equal."""
+
     def __eq__(self, that):
         try:
             that_str = json.loads(str(that))
@@ -159,6 +151,7 @@ class EqualByJsonMixin:
 
 class EqualByPropsMixin:
     """Equal if the props are equal."""
+
     def __eq__(self, that):
         try:
             return self.props == that.props
@@ -168,32 +161,31 @@ class EqualByPropsMixin:
 
 class CanonicalPropertiesMixin:
     """A Mixin that provides canonical properties to Schema and Field types."""
+
     @property
     def canonical_properties(self):
         props = self.props
-        return collections.OrderedDict(
-            (key, props[key])
-            for key in CANONICAL_FIELD_ORDER
-            if key in props)
+        return collections.OrderedDict((key, props[key]) for key in CANONICAL_FIELD_ORDER if key in props)
 
 
 class Schema(abc.ABC, CanonicalPropertiesMixin):
     """Base class for all Schema classes."""
+
     _props = None
 
     def __init__(self, type, other_props=None):
         # Ensure valid ctor args
         if not isinstance(type, str):
-            fail_msg = 'Schema type must be a string.'
+            fail_msg = "Schema type must be a string."
             raise avro.errors.SchemaParseException(fail_msg)
         elif type not in VALID_TYPES:
-            fail_msg = '%s is not a valid type.' % type
+            fail_msg = f"{type} is not a valid type."
             raise avro.errors.SchemaParseException(fail_msg)
 
         # add members
         if self._props is None:
             self._props = {}
-        self.set_prop('type', type)
+        self.set_prop("type", type)
         self.type = type
         self._props.update(other_props or {})
 
@@ -310,18 +302,18 @@ class Name:
         if name_attr is None:
             return
         if name_attr == "":
-            raise avro.errors.SchemaParseException('Name must not be the empty string.')
+            raise avro.errors.SchemaParseException("Name must not be the empty string.")
 
-        if '.' in name_attr or space_attr == "" or not (space_attr or default_space):
+        if "." in name_attr or space_attr == "" or not (space_attr or default_space):
             # The empty string may be used as a namespace to indicate the null namespace.
             self._full = name_attr
         else:
-            self._full = "{!s}.{!s}".format(space_attr or default_space, name_attr)
+            self._full = f"{space_attr or default_space!s}.{name_attr!s}"
 
         self._validate_fullname(self._full)
 
     def _validate_fullname(self, fullname):
-        for name in fullname.split('.'):
+        for name in fullname.split("."):
             validate_basename(name)
 
     def __eq__(self, other):
@@ -343,7 +335,7 @@ class Name:
         return self._full.rsplit(".", 1)[0] if "." in self._full else None
 
     def get_space(self):
-        warnings.warn('Name.get_space() is deprecated in favor of Name.space')
+        warnings.warn("Name.get_space() is deprecated in favor of Name.space")
         return self.space
 
 
@@ -369,17 +361,17 @@ class Names:
             # I have no default -- no change
             return properties
 
-        if 'namespace' not in properties:
+        if "namespace" not in properties:
             # he has no namespace - no change
             return properties
 
-        if properties['namespace'] != self.default_namespace:
+        if properties["namespace"] != self.default_namespace:
             # we're different - leave his stuff alone
             return properties
 
         # we each have a namespace and it's redundant. delete his.
         prunable = properties.copy()
-        del(prunable['namespace'])
+        del prunable["namespace"]
         return prunable
 
     def add_name(self, name_attr, space_attr, new_schema):
@@ -394,10 +386,10 @@ class Names:
         to_add = Name(name_attr, space_attr, self.default_namespace)
 
         if to_add.fullname in VALID_TYPES:
-            fail_msg = '%s is a reserved type name.' % to_add.fullname
+            fail_msg = f"{to_add.fullname} is a reserved type name."
             raise avro.errors.SchemaParseException(fail_msg)
         elif to_add.fullname in self.names:
-            fail_msg = 'The name "%s" is already in use.' % to_add.fullname
+            fail_msg = f'The name "{to_add.fullname}" is already in use.'
             raise avro.errors.SchemaParseException(fail_msg)
 
         self.names[to_add.fullname] = new_schema
@@ -439,10 +431,11 @@ class NamedSchema(Schema):
         return self.name if self.namespace == names.default_namespace else self.fullname
 
     # read-only properties
-    name = property(lambda self: self.get_prop('name'))
-    namespace = property(lambda self: self.get_prop('namespace'))
+    name = property(lambda self: self.get_prop("name"))
+    namespace = property(lambda self: self.get_prop("namespace"))
     fullname = property(lambda self: self._fullname)
 
+
 #
 # Logical type class
 #
@@ -452,6 +445,7 @@ class LogicalSchema:
     def __init__(self, logical_type):
         self.logical_type = logical_type
 
+
 #
 # Decimal logical schema
 #
@@ -460,37 +454,41 @@ class LogicalSchema:
 class DecimalLogicalSchema(LogicalSchema):
     def __init__(self, precision, scale=0, max_precision=0):
         if not isinstance(precision, int) or precision <= 0:
-            raise avro.errors.IgnoredLogicalType(
-                "Invalid decimal precision {}. Must be a positive integer.".format(precision))
+            raise avro.errors.IgnoredLogicalType(f"Invalid decimal precision {precision}. Must be a positive integer.")
 
         if precision > max_precision:
-            raise avro.errors.IgnoredLogicalType(
-                "Invalid decimal precision {}. Max is {}.".format(precision, max_precision))
+            raise avro.errors.IgnoredLogicalType(f"Invalid decimal precision {precision}. Max is {max_precision}.")
 
         if not isinstance(scale, int) or scale < 0:
-            raise avro.errors.IgnoredLogicalType(
-                "Invalid decimal scale {}. Must be a positive integer.".format(scale))
+            raise avro.errors.IgnoredLogicalType(f"Invalid decimal scale {scale}. Must be a positive integer.")
 
         if scale > precision:
-            raise avro.errors.IgnoredLogicalType(
-                    "Invalid decimal scale {}. Cannot be greater than precision {}.".format(
-                        scale, precision))
+            raise avro.errors.IgnoredLogicalType(f"Invalid decimal scale {scale}. Cannot be greater than precision {precision}.")
 
-        super(DecimalLogicalSchema, self).__init__('decimal')
+        super().__init__("decimal")
 
 
 class Field(CanonicalPropertiesMixin, EqualByJsonMixin):
-    def __init__(self, type, name, has_default, default=None,
-                 order=None, names=None, doc=None, other_props=None):
+    def __init__(
+        self,
+        type,
+        name,
+        has_default,
+        default=None,
+        order=None,
+        names=None,
+        doc=None,
+        other_props=None,
+    ):
         # Ensure valid ctor args
         if not name:
-            fail_msg = 'Fields must have a non-empty name.'
+            fail_msg = "Fields must have a non-empty name."
             raise avro.errors.SchemaParseException(fail_msg)
         elif not isinstance(name, str):
-            fail_msg = 'The name property must be a string.'
+            fail_msg = "The name property must be a string."
             raise avro.errors.SchemaParseException(fail_msg)
         elif order is not None and order not in VALID_FIELD_SORT_ORDERS:
-            fail_msg = 'The order property %s is not valid.' % order
+            fail_msg = f"The order property {order} is not valid."
             raise avro.errors.SchemaParseException(fail_msg)
 
         # add members
@@ -498,39 +496,40 @@ class Field(CanonicalPropertiesMixin, EqualByJsonMixin):
         self._has_default = has_default
         self._props.update(other_props or {})
 
-        if (isinstance(type, str) and names is not None and
-                names.has_name(type, None)):
+        if isinstance(type, str) and names is not None and names.has_name(type, None):
             type_schema = names.get_name(type, None)
         else:
             try:
                 type_schema = make_avsc_object(type, names)
             except Exception as e:
-                fail_msg = 'Type property "%s" not a valid Avro schema: %s' % (type, e)
+                fail_msg = f'Type property "{type}" not a valid Avro schema: {e}'
                 raise avro.errors.SchemaParseException(fail_msg)
-        self.set_prop('type', type_schema)
-        self.set_prop('name', name)
+        self.set_prop("type", type_schema)
+        self.set_prop("name", name)
         self.type = type_schema
         self.name = name
         # TODO(hammer): check to ensure default is valid
         if has_default:
-            self.set_prop('default', default)
+            self.set_prop("default", default)
         if order is not None:
-            self.set_prop('order', order)
+            self.set_prop("order", order)
         if doc is not None:
-            self.set_prop('doc', doc)
+            self.set_prop("doc", doc)
 
     # read-only properties
-    default = property(lambda self: self.get_prop('default'))
+    default = property(lambda self: self.get_prop("default"))
     has_default = property(lambda self: self._has_default)
-    order = property(lambda self: self.get_prop('order'))
-    doc = property(lambda self: self.get_prop('doc'))
+    order = property(lambda self: self.get_prop("order"))
+    doc = property(lambda self: self.get_prop("doc"))
     props = property(lambda self: self._props)
 
     # Read-only property dict. Non-reserved properties
-    other_props = property(lambda self: get_other_props(self._props, FIELD_RESERVED_PROPS),
-                           doc="dictionary of non-reserved properties")
+    other_props = property(
+        lambda self: get_other_props(self._props, FIELD_RESERVED_PROPS),
+        doc="dictionary of non-reserved properties",
+    )
 
-# utility functions to manipulate properties dict
+    # utility functions to manipulate properties dict
     def get_prop(self, key):
         return self._props.get(key)
 
@@ -544,7 +543,7 @@ class Field(CanonicalPropertiesMixin, EqualByJsonMixin):
         names = names or Names()
 
         to_dump = self.props.copy()
-        to_dump['type'] = self.type.to_json(names)
+        to_dump["type"] = self.type.to_json(names)
 
         return to_dump
 
@@ -556,6 +555,7 @@ class Field(CanonicalPropertiesMixin, EqualByJsonMixin):
 
         return to_dump
 
+
 #
 # Primitive Types
 #
@@ -565,20 +565,20 @@ class PrimitiveSchema(EqualByPropsMixin, Schema):
     """Valid primitive types are in PRIMITIVE_TYPES."""
 
     _validators = {
-        'null': lambda x: x is None,
-        'boolean': lambda x: isinstance(x, bool),
-        'string': lambda x: isinstance(x, str),
-        'bytes': lambda x: isinstance(x, bytes),
-        'int': lambda x: isinstance(x, int) and INT_MIN_VALUE <= x <= INT_MAX_VALUE,
-        'long': lambda x: isinstance(x, int) and LONG_MIN_VALUE <= x <= LONG_MAX_VALUE,
-        'float': lambda x: isinstance(x, (int, float)),
-        'double': lambda x: isinstance(x, (int, float)),
+        "null": lambda x: x is None,
+        "boolean": lambda x: isinstance(x, bool),
+        "string": lambda x: isinstance(x, str),
+        "bytes": lambda x: isinstance(x, bytes),
+        "int": lambda x: isinstance(x, int) and INT_MIN_VALUE <= x <= INT_MAX_VALUE,
+        "long": lambda x: isinstance(x, int) and LONG_MIN_VALUE <= x <= LONG_MAX_VALUE,
+        "float": lambda x: isinstance(x, (int, float)),
+        "double": lambda x: isinstance(x, (int, float)),
     }
 
     def __init__(self, type, other_props=None):
         # Ensure valid ctor args
         if type not in PRIMITIVE_TYPES:
-            raise avro.errors.AvroException("%s is not a valid primitive type." % type)
+            raise avro.errors.AvroException(f"{type} is not a valid primitive type.")
 
         # Call parent ctor
         Schema.__init__(self, type, other_props=other_props)
@@ -591,11 +591,18 @@ class PrimitiveSchema(EqualByPropsMixin, Schema):
         @arg writer: the schema to match against
         @return bool
         """
-        return self.type == writer.type or {
-            'float': self.type == 'double',
-            'int': self.type in {'double', 'float', 'long'},
-            'long': self.type in {'double', 'float', },
-        }.get(writer.type, False)
+        return (
+            self.type == writer.type
+            or {
+                "float": self.type == "double",
+                "int": self.type in {"double", "float", "long"},
+                "long": self.type
+                in {
+                    "double",
+                    "float",
+                },
+            }.get(writer.type, False)
+        )
 
     def to_json(self, names=None):
         if len(self.props) == 1:
@@ -615,6 +622,7 @@ class PrimitiveSchema(EqualByPropsMixin, Schema):
         validator = self._validators.get(self.type, lambda x: False)
         return self if validator(datum) else None
 
+
 #
 # Decimal Bytes Type
 #
@@ -623,13 +631,13 @@ class PrimitiveSchema(EqualByPropsMixin, Schema):
 class BytesDecimalSchema(PrimitiveSchema, DecimalLogicalSchema):
     def __init__(self, precision, scale=0, other_props=None):
         DecimalLogicalSchema.__init__(self, precision, scale, max_precision=((1 << 31) - 1))
-        PrimitiveSchema.__init__(self, 'bytes', other_props)
-        self.set_prop('precision', precision)
-        self.set_prop('scale', scale)
+        PrimitiveSchema.__init__(self, "bytes", other_props)
+        self.set_prop("precision", precision)
+        self.set_prop("scale", scale)
 
     # read-only properties
-    precision = property(lambda self: self.get_prop('precision'))
-    scale = property(lambda self: self.get_prop('scale'))
+    precision = property(lambda self: self.get_prop("precision"))
+    scale = property(lambda self: self.get_prop("scale"))
 
     def to_json(self, names=None):
         return self.props
@@ -646,17 +654,17 @@ class FixedSchema(EqualByPropsMixin, NamedSchema):
     def __init__(self, name, namespace, size, names=None, other_props=None):
         # Ensure valid ctor args
         if not isinstance(size, int) or size < 0:
-            fail_msg = 'Fixed Schema requires a valid positive integer for size property.'
+            fail_msg = "Fixed Schema requires a valid positive integer for size property."
             raise avro.errors.AvroException(fail_msg)
 
         # Call parent ctor
-        NamedSchema.__init__(self, 'fixed', name, namespace, names, other_props)
+        NamedSchema.__init__(self, "fixed", name, namespace, names, other_props)
 
         # Add class members
-        self.set_prop('size', size)
+        self.set_prop("size", size)
 
     # read-only properties
-    size = property(lambda self: self.get_prop('size'))
+    size = property(lambda self: self.get_prop("size"))
 
     def match(self, writer):
         """Return True if the current schema (as reader) matches the writer schema.
@@ -664,7 +672,7 @@ class FixedSchema(EqualByPropsMixin, NamedSchema):
         @arg writer: the schema to match against
         @return bool
         """
-        return self.type == writer.type and self.check_props(writer, ['fullname', 'size'])
+        return self.type == writer.type and self.check_props(writer, ["fullname", "size"])
 
     def to_json(self, names=None):
         names = names or Names()
@@ -685,22 +693,32 @@ class FixedSchema(EqualByPropsMixin, NamedSchema):
         """Return self if datum is a valid representation of this schema, else None."""
         return self if isinstance(datum, bytes) and len(datum) == self.size else None
 
+
 #
 # Decimal Fixed Type
 #
 
 
 class FixedDecimalSchema(FixedSchema, DecimalLogicalSchema):
-    def __init__(self, size, name, precision, scale=0, namespace=None, names=None, other_props=None):
+    def __init__(
+        self,
+        size,
+        name,
+        precision,
+        scale=0,
+        namespace=None,
+        names=None,
+        other_props=None,
+    ):
         max_precision = int(math.floor(math.log10(2) * (8 * size - 1)))
         DecimalLogicalSchema.__init__(self, precision, scale, max_precision)
         FixedSchema.__init__(self, name, namespace, size, names, other_props)
-        self.set_prop('precision', precision)
-        self.set_prop('scale', scale)
+        self.set_prop("precision", precision)
+        self.set_prop("scale", scale)
 
     # read-only properties
-    precision = property(lambda self: self.get_prop('precision'))
-    scale = property(lambda self: self.get_prop('scale'))
+    precision = property(lambda self: self.get_prop("precision"))
+    scale = property(lambda self: self.get_prop("scale"))
 
     def to_json(self, names=None):
         return self.props
@@ -711,7 +729,16 @@ class FixedDecimalSchema(FixedSchema, DecimalLogicalSchema):
 
 
 class EnumSchema(EqualByPropsMixin, NamedSchema):
-    def __init__(self, name, namespace, symbols, names=None, doc=None, other_props=None, validate_enum_symbols=True):
+    def __init__(
+        self,
+        name,
+        namespace,
+        symbols,
+        names=None,
+        doc=None,
+        other_props=None,
+        validate_enum_symbols=True,
+    ):
         """
         @arg validate_enum_symbols: If False, will allow enum symbols that are not valid Avro names.
         """
@@ -723,20 +750,20 @@ class EnumSchema(EqualByPropsMixin, NamedSchema):
                     raise avro.errors.InvalidName("An enum symbol must be a valid schema name.")
 
         if len(set(symbols)) < len(symbols):
-            fail_msg = 'Duplicate symbol: %s' % symbols
+            fail_msg = f"Duplicate symbol: {symbols}"
             raise avro.errors.AvroException(fail_msg)
 
         # Call parent ctor
-        NamedSchema.__init__(self, 'enum', name, namespace, names, other_props)
+        NamedSchema.__init__(self, "enum", name, namespace, names, other_props)
 
         # Add class members
-        self.set_prop('symbols', symbols)
+        self.set_prop("symbols", symbols)
         if doc is not None:
-            self.set_prop('doc', doc)
+            self.set_prop("doc", doc)
 
     # read-only properties
-    symbols = property(lambda self: self.get_prop('symbols'))
-    doc = property(lambda self: self.get_prop('doc'))
+    symbols = property(lambda self: self.get_prop("symbols"))
+    doc = property(lambda self: self.get_prop("doc"))
 
     def match(self, writer):
         """Return True if the current schema (as reader) matches the writer schema.
@@ -744,7 +771,7 @@ class EnumSchema(EqualByPropsMixin, NamedSchema):
         @arg writer: the schema to match against
         @return bool
         """
-        return self.type == writer.type and self.check_props(writer, ['fullname'])
+        return self.type == writer.type and self.check_props(writer, ["fullname"])
 
     def to_json(self, names=None):
         names = names or Names()
@@ -770,6 +797,7 @@ class EnumSchema(EqualByPropsMixin, NamedSchema):
         """Return self if datum is a valid member of this Enum, else None."""
         return self if datum in self.symbols else None
 
+
 #
 # Complex Types (recursive)
 #
@@ -778,7 +806,7 @@ class EnumSchema(EqualByPropsMixin, NamedSchema):
 class ArraySchema(EqualByJsonMixin, Schema):
     def __init__(self, items, names=None, other_props=None):
         # Call parent ctor
-        Schema.__init__(self, 'array', other_props)
+        Schema.__init__(self, "array", other_props)
         # Add class members
 
         if isinstance(items, str) and names.has_name(items, None):
@@ -787,13 +815,13 @@ class ArraySchema(EqualByJsonMixin, Schema):
             try:
                 items_schema = make_avsc_object(items, names)
             except avro.errors.SchemaParseException as e:
-                fail_msg = 'Items schema (%s) not a valid Avro schema: %s (known names: %s)' % (items, e, names.names.keys())
+                fail_msg = f"Items schema ({items}) not a valid Avro schema: {e} (known names: {names.names.keys()})"
                 raise avro.errors.SchemaParseException(fail_msg)
 
-        self.set_prop('items', items_schema)
+        self.set_prop("items", items_schema)
 
     # read-only properties
-    items = property(lambda self: self.get_prop('items'))
+    items = property(lambda self: self.get_prop("items"))
 
     def match(self, writer):
         """Return True if the current schema (as reader) matches the writer schema.
@@ -801,14 +829,14 @@ class ArraySchema(EqualByJsonMixin, Schema):
         @arg writer: the schema to match against
         @return bool
         """
-        return self.type == writer.type and self.items.check_props(writer.items, ['type'])
+        return self.type == writer.type and self.items.check_props(writer.items, ["type"])
 
     def to_json(self, names=None):
         names = names or Names()
 
         to_dump = self.props.copy()
-        item_schema = self.get_prop('items')
-        to_dump['items'] = item_schema.to_json(names)
+        item_schema = self.get_prop("items")
+        to_dump["items"] = item_schema.to_json(names)
 
         return to_dump
 
@@ -829,7 +857,7 @@ class ArraySchema(EqualByJsonMixin, Schema):
 class MapSchema(EqualByJsonMixin, Schema):
     def __init__(self, values, names=None, other_props=None):
         # Call parent ctor
-        Schema.__init__(self, 'map', other_props)
+        Schema.__init__(self, "map", other_props)
 
         # Add class members
         if isinstance(values, str) and names.has_name(values, None):
@@ -840,12 +868,12 @@ class MapSchema(EqualByJsonMixin, Schema):
             except avro.errors.SchemaParseException:
                 raise
             except Exception:
-                raise avro.errors.SchemaParseException('Values schema is not a valid Avro schema.')
+                raise avro.errors.SchemaParseException("Values schema is not a valid Avro schema.")
 
-        self.set_prop('values', values_schema)
+        self.set_prop("values", values_schema)
 
     # read-only properties
-    values = property(lambda self: self.get_prop('values'))
+    values = property(lambda self: self.get_prop("values"))
 
     def match(self, writer):
         """Return True if the current schema (as reader) matches the writer schema.
@@ -853,13 +881,13 @@ class MapSchema(EqualByJsonMixin, Schema):
         @arg writer: the schema to match against
         @return bool
         """
-        return writer.type == self.type and self.values.check_props(writer.values, ['type'])
+        return writer.type == self.type and self.values.check_props(writer.values, ["type"])
 
     def to_json(self, names=None):
         names = names or Names()
 
         to_dump = self.props.copy()
-        to_dump['values'] = self.get_prop('values').to_json(names)
+        to_dump["values"] = self.get_prop("values").to_json(names)
 
         return to_dump
 
@@ -884,11 +912,11 @@ class UnionSchema(EqualByJsonMixin, Schema):
     def __init__(self, schemas, names=None):
         # Ensure valid ctor args
         if not isinstance(schemas, list):
-            fail_msg = 'Union schema requires a list of schemas.'
+            fail_msg = "Union schema requires a list of schemas."
             raise avro.errors.SchemaParseException(fail_msg)
 
         # Call parent ctor
-        Schema.__init__(self, 'union')
+        Schema.__init__(self, "union")
 
         # Add class members
         schema_objects = []
@@ -899,13 +927,16 @@ class UnionSchema(EqualByJsonMixin, Schema):
                 try:
                     new_schema = make_avsc_object(schema, names)
                 except Exception as e:
-                    raise avro.errors.SchemaParseException('Union item must be a valid Avro schema: %s' % str(e))
+                    raise avro.errors.SchemaParseException(f"Union item must be a valid Avro schema: {e}")
             # check the new schema
-            if (new_schema.type in VALID_TYPES and new_schema.type not in NAMED_TYPES and
-                    new_schema.type in [schema.type for schema in schema_objects]):
-                raise avro.errors.SchemaParseException('%s type already in Union' % new_schema.type)
-            elif new_schema.type == 'union':
-                raise avro.errors.SchemaParseException('Unions cannot contain other unions.')
+            if (
+                new_schema.type in VALID_TYPES
+                and new_schema.type not in NAMED_TYPES
+                and new_schema.type in [schema.type for schema in schema_objects]
+            ):
+                raise avro.errors.SchemaParseException(f"{new_schema.type} type already in Union")
+            elif new_schema.type == "union":
+                raise avro.errors.SchemaParseException("Unions cannot contain other unions.")
             else:
                 schema_objects.append(new_schema)
         self._schemas = schema_objects
@@ -919,7 +950,7 @@ class UnionSchema(EqualByJsonMixin, Schema):
         @arg writer: the schema to match against
         @return bool
         """
-        return writer.type in {'union', 'error_union'} or any(s.match(writer) for s in self.schemas)
+        return writer.type in {"union", "error_union"} or any(s.match(writer) for s in self.schemas)
 
     def to_json(self, names=None):
         names = names or Names()
@@ -945,7 +976,7 @@ class UnionSchema(EqualByJsonMixin, Schema):
 class ErrorUnionSchema(UnionSchema):
     def __init__(self, schemas, names=None):
         # Prepend "string" to handle system errors
-        UnionSchema.__init__(self, ['string'] + schemas, names)
+        UnionSchema.__init__(self, ["string"] + schemas, names)
 
     def to_json(self, names=None):
         names = names or Names()
@@ -953,7 +984,7 @@ class ErrorUnionSchema(UnionSchema):
         to_dump = []
         for schema in self.schemas:
             # Don't print the system error schema
-            if schema.type == 'string':
+            if schema.type == "string":
                 continue
             to_dump.append(schema.to_json(names))
 
@@ -967,29 +998,28 @@ class RecordSchema(EqualByJsonMixin, NamedSchema):
         field_objects = []
         field_names = []
         for i, field in enumerate(field_data):
-            if callable(getattr(field, 'get', None)):
-                type = field.get('type')
-                name = field.get('name')
+            if callable(getattr(field, "get", None)):
+                type = field.get("type")
+                name = field.get("name")
 
                 # null values can have a default value of None
                 has_default = False
                 default = None
-                if 'default' in field:
+                if "default" in field:
                     has_default = True
-                    default = field.get('default')
+                    default = field.get("default")
 
-                order = field.get('order')
-                doc = field.get('doc')
+                order = field.get("order")
+                doc = field.get("doc")
                 other_props = get_other_props(field, FIELD_RESERVED_PROPS)
-                new_field = Field(type, name, has_default, default, order, names, doc,
-                                  other_props)
+                new_field = Field(type, name, has_default, default, order, names, doc, other_props)
                 # make sure field name has not been used yet
                 if new_field.name in field_names:
-                    fail_msg = 'Field name %s already in use.' % new_field.name
+                    fail_msg = f"Field name {new_field.name} already in use."
                     raise avro.errors.SchemaParseException(fail_msg)
                 field_names.append(new_field.name)
             else:
-                raise avro.errors.SchemaParseException('Not a valid field: %s' % field)
+                raise avro.errors.SchemaParseException(f"Not a valid field: {field}")
             field_objects.append(new_field)
         return field_objects
 
@@ -999,42 +1029,48 @@ class RecordSchema(EqualByJsonMixin, NamedSchema):
         @arg writer: the schema to match against
         @return bool
         """
-        return writer.type == self.type and (self.type == 'request' or self.check_props(writer, ['fullname']))
-
-    def __init__(self, name, namespace, fields, names=None, schema_type='record',
-                 doc=None, other_props=None):
+        return writer.type == self.type and (self.type == "request" or self.check_props(writer, ["fullname"]))
+
+    def __init__(
+        self,
+        name,
+        namespace,
+        fields,
+        names=None,
+        schema_type="record",
+        doc=None,
+        other_props=None,
+    ):
         # Ensure valid ctor args
         if fields is None:
-            fail_msg = 'Record schema requires a non-empty fields property.'
+            fail_msg = "Record schema requires a non-empty fields property."
             raise avro.errors.SchemaParseException(fail_msg)
         elif not isinstance(fields, list):
-            fail_msg = 'Fields property must be a list of Avro schemas.'
+            fail_msg = "Fields property must be a list of Avro schemas."
             raise avro.errors.SchemaParseException(fail_msg)
 
         # Call parent ctor (adds own name to namespace, too)
-        if schema_type == 'request':
+        if schema_type == "request":
             Schema.__init__(self, schema_type, other_props)
         else:
-            NamedSchema.__init__(self, schema_type, name, namespace, names,
-                                 other_props)
+            NamedSchema.__init__(self, schema_type, name, namespace, names, other_props)
 
-        if schema_type == 'record':
+        if schema_type == "record":
             old_default = names.default_namespace
-            names.default_namespace = Name(name, namespace,
-                                           names.default_namespace).space
+            names.default_namespace = Name(name, namespace, names.default_namespace).space
 
         # Add class members
         field_objects = RecordSchema.make_field_objects(fields, names)
-        self.set_prop('fields', field_objects)
+        self.set_prop("fields", field_objects)
         if doc is not None:
-            self.set_prop('doc', doc)
+            self.set_prop("doc", doc)
 
-        if schema_type == 'record':
+        if schema_type == "record":
             names.default_namespace = old_default
 
     # read-only properties
-    fields = property(lambda self: self.get_prop('fields'))
-    doc = property(lambda self: self.get_prop('doc'))
+    fields = property(lambda self: self.get_prop("fields"))
+    doc = property(lambda self: self.get_prop("doc"))
 
     @property
     def fields_dict(self):
@@ -1047,7 +1083,7 @@ class RecordSchema(EqualByJsonMixin, NamedSchema):
         names = names or Names()
 
         # Request records don't have names
-        if self.type == 'request':
+        if self.type == "request":
             return [f.to_json(names) for f in self.fields]
 
         if self.fullname in names.names:
@@ -1056,14 +1092,14 @@ class RecordSchema(EqualByJsonMixin, NamedSchema):
             names.names[self.fullname] = self
 
         to_dump = names.prune_namespace(self.props.copy())
-        to_dump['fields'] = [f.to_json(names) for f in self.fields]
+        to_dump["fields"] = [f.to_json(names) for f in self.fields]
 
         return to_dump
 
     def to_canonical_json(self, names=None):
         names = names or Names()
 
-        if self.type == 'request':
+        if self.type == "request":
             raise NotImplementedError("Canonical form (probably) does not make sense on type request")
 
         to_dump = self.canonical_properties
@@ -1086,10 +1122,11 @@ class RecordSchema(EqualByJsonMixin, NamedSchema):
 # Date Type
 #
 
+
 class DateSchema(LogicalSchema, PrimitiveSchema):
     def __init__(self, other_props=None):
         LogicalSchema.__init__(self, avro.constants.DATE)
-        PrimitiveSchema.__init__(self, 'int', other_props)
+        PrimitiveSchema.__init__(self, "int", other_props)
 
     def to_json(self, names=None):
         return self.props
@@ -1098,6 +1135,7 @@ class DateSchema(LogicalSchema, PrimitiveSchema):
         """Return self if datum is a valid date object, else None."""
         return self if isinstance(datum, datetime.date) else None
 
+
 #
 # time-millis Type
 #
@@ -1106,7 +1144,7 @@ class DateSchema(LogicalSchema, PrimitiveSchema):
 class TimeMillisSchema(LogicalSchema, PrimitiveSchema):
     def __init__(self, other_props=None):
         LogicalSchema.__init__(self, avro.constants.TIME_MILLIS)
-        PrimitiveSchema.__init__(self, 'int', other_props)
+        PrimitiveSchema.__init__(self, "int", other_props)
 
     def to_json(self, names=None):
         return self.props
@@ -1115,6 +1153,7 @@ class TimeMillisSchema(LogicalSchema, PrimitiveSchema):
         """Return self if datum is a valid representation of this schema, else None."""
         return self if isinstance(datum, datetime.time) else None
 
+
 #
 # time-micros Type
 #
@@ -1123,7 +1162,7 @@ class TimeMillisSchema(LogicalSchema, PrimitiveSchema):
 class TimeMicrosSchema(LogicalSchema, PrimitiveSchema):
     def __init__(self, other_props=None):
         LogicalSchema.__init__(self, avro.constants.TIME_MICROS)
-        PrimitiveSchema.__init__(self, 'long', other_props)
+        PrimitiveSchema.__init__(self, "long", other_props)
 
     def to_json(self, names=None):
         return self.props
@@ -1132,6 +1171,7 @@ class TimeMicrosSchema(LogicalSchema, PrimitiveSchema):
         """Return self if datum is a valid representation of this schema, else None."""
         return self if isinstance(datum, datetime.time) else None
 
+
 #
 # timestamp-millis Type
 #
@@ -1140,7 +1180,7 @@ class TimeMicrosSchema(LogicalSchema, PrimitiveSchema):
 class TimestampMillisSchema(LogicalSchema, PrimitiveSchema):
     def __init__(self, other_props=None):
         LogicalSchema.__init__(self, avro.constants.TIMESTAMP_MILLIS)
-        PrimitiveSchema.__init__(self, 'long', other_props)
+        PrimitiveSchema.__init__(self, "long", other_props)
 
     def to_json(self, names=None):
         return self.props
@@ -1148,6 +1188,7 @@ class TimestampMillisSchema(LogicalSchema, PrimitiveSchema):
     def validate(self, datum):
         return self if isinstance(datum, datetime.datetime) and _is_timezone_aware_datetime(datum) else None
 
+
 #
 # timestamp-micros Type
 #
@@ -1156,7 +1197,7 @@ class TimestampMillisSchema(LogicalSchema, PrimitiveSchema):
 class TimestampMicrosSchema(LogicalSchema, PrimitiveSchema):
     def __init__(self, other_props=None):
         LogicalSchema.__init__(self, avro.constants.TIMESTAMP_MICROS)
-        PrimitiveSchema.__init__(self, 'long', other_props)
+        PrimitiveSchema.__init__(self, "long", other_props)
 
     def to_json(self, names=None):
         return self.props
@@ -1173,7 +1214,7 @@ class TimestampMicrosSchema(LogicalSchema, PrimitiveSchema):
 class UUIDSchema(LogicalSchema, PrimitiveSchema):
     def __init__(self, other_props=None):
         LogicalSchema.__init__(self, avro.constants.UUID)
-        PrimitiveSchema.__init__(self, 'string', other_props)
+        PrimitiveSchema.__init__(self, "string", other_props)
 
     def to_json(self, names=None):
         return self.props
@@ -1188,6 +1229,7 @@ class UUIDSchema(LogicalSchema, PrimitiveSchema):
 
         return self
 
+
 #
 # Module Methods
 #
@@ -1198,27 +1240,27 @@ def get_other_props(all_props, reserved_props):
     Retrieve the non-reserved properties from a dictionary of properties
     @args reserved_props: The set of reserved properties to exclude
     """
-    if callable(getattr(all_props, 'items', None)):
+    if callable(getattr(all_props, "items", None)):
         return {k: v for k, v in all_props.items() if k not in reserved_props}
 
 
 def make_bytes_decimal_schema(other_props):
     """Make a BytesDecimalSchema from just other_props."""
-    return BytesDecimalSchema(other_props.get('precision'), other_props.get('scale', 0))
+    return BytesDecimalSchema(other_props.get("precision"), other_props.get("scale", 0))
 
 
 def make_logical_schema(logical_type, type_, other_props):
     """Map the logical types to the appropriate literal type and schema class."""
     logical_types = {
-        (avro.constants.DATE, 'int'): DateSchema,
-        (avro.constants.DECIMAL, 'bytes'): make_bytes_decimal_schema,
+        (avro.constants.DATE, "int"): DateSchema,
+        (avro.constants.DECIMAL, "bytes"): make_bytes_decimal_schema,
         # The fixed decimal schema is handled later by returning None now.
-        (avro.constants.DECIMAL, 'fixed'): lambda x: None,
-        (avro.constants.TIMESTAMP_MICROS, 'long'): TimestampMicrosSchema,
-        (avro.constants.TIMESTAMP_MILLIS, 'long'): TimestampMillisSchema,
-        (avro.constants.TIME_MICROS, 'long'): TimeMicrosSchema,
-        (avro.constants.TIME_MILLIS, 'int'): TimeMillisSchema,
-        (avro.constants.UUID, 'string'): UUIDSchema,
+        (avro.constants.DECIMAL, "fixed"): lambda x: None,
+        (avro.constants.TIMESTAMP_MICROS, "long"): TimestampMicrosSchema,
+        (avro.constants.TIMESTAMP_MILLIS, "long"): TimestampMillisSchema,
+        (avro.constants.TIME_MICROS, "long"): TimeMicrosSchema,
+        (avro.constants.TIME_MILLIS, "int"): TimeMillisSchema,
+        (avro.constants.UUID, "string"): UUIDSchema,
     }
     try:
         schema_type = logical_types.get((logical_type, type_), None)
@@ -1228,10 +1270,10 @@ def make_logical_schema(logical_type, type_, other_props):
         expected_types = sorted(literal_type for lt, literal_type in logical_types if lt == logical_type)
         if expected_types:
             warnings.warn(
-                avro.errors.IgnoredLogicalType("Logical type {} requires literal type {}, not {}.".format(
-                    logical_type, "/".join(expected_types), type_)))
+                avro.errors.IgnoredLogicalType(f"Logical type {logical_type} requires literal type {'/'.join(expected_types)}, not {type_}.")
+            )
         else:
-            warnings.warn(avro.errors.IgnoredLogicalType("Unknown {}, using {}.".format(logical_type, type_)))
+            warnings.warn(avro.errors.IgnoredLogicalType(f"Unknown {logical_type}, using {type_}."))
     except avro.errors.IgnoredLogicalType as warning:
         warnings.warn(warning)
     return None
@@ -1247,10 +1289,10 @@ def make_avsc_object(json_data, names=None, validate_enum_symbols=True):
     names = names or Names()
 
     # JSON object (non-union)
-    if callable(getattr(json_data, 'get', None)):
-        type = json_data.get('type')
+    if callable(getattr(json_data, "get", None)):
+        type = json_data.get("type")
         other_props = get_other_props(json_data, SCHEMA_RESERVED_PROPS)
-        logical_type = json_data.get('logicalType')
+        logical_type = json_data.get("logicalType")
 
         if logical_type:
             logical_schema = make_logical_schema(logical_type, type, other_props or {})
@@ -1258,48 +1300,56 @@ def make_avsc_object(json_data, names=None, validate_enum_symbols=True):
                 return logical_schema
 
         if type in NAMED_TYPES:
-            name = json_data.get('name')
-            namespace = json_data.get('namespace', names.default_namespace)
-            if type == 'fixed':
-                size = json_data.get('size')
-                if logical_type == 'decimal':
-                    precision = json_data.get('precision')
-                    scale = 0 if json_data.get('scale') is None else json_data.get('scale')
+            name = json_data.get("name")
+            namespace = json_data.get("namespace", names.default_namespace)
+            if type == "fixed":
+                size = json_data.get("size")
+                if logical_type == "decimal":
+                    precision = json_data.get("precision")
+                    scale = 0 if json_data.get("scale") is None else json_data.get("scale")
                     try:
                         return FixedDecimalSchema(size, name, precision, scale, namespace, names, other_props)
                     except avro.errors.IgnoredLogicalType as warning:
                         warnings.warn(warning)
                 return FixedSchema(name, namespace, size, names, other_props)
-            elif type == 'enum':
-                symbols = json_data.get('symbols')
-                doc = json_data.get('doc')
-                return EnumSchema(name, namespace, symbols, names, doc, other_props, validate_enum_symbols)
-            elif type in ['record', 'error']:
-                fields = json_data.get('fields')
-                doc = json_data.get('doc')
+            elif type == "enum":
+                symbols = json_data.get("symbols")
+                doc = json_data.get("doc")
+                return EnumSchema(
+                    name,
+                    namespace,
+                    symbols,
+                    names,
+                    doc,
+                    other_props,
+                    validate_enum_symbols,
+                )
+            elif type in ["record", "error"]:
+                fields = json_data.get("fields")
+                doc = json_data.get("doc")
                 return RecordSchema(name, namespace, fields, names, type, doc, other_props)
             else:
-                raise avro.errors.SchemaParseException('Unknown Named Type: %s' % type)
+                raise avro.errors.SchemaParseException(f"Unknown Named Type: {type}")
 
         if type in PRIMITIVE_TYPES:
             return PrimitiveSchema(type, other_props)
 
         if type in VALID_TYPES:
-            if type == 'array':
-                items = json_data.get('items')
+            if type == "array":
+                items = json_data.get("items")
                 return ArraySchema(items, names, other_props)
-            elif type == 'map':
-                values = json_data.get('values')
+            elif type == "map":
+                values = json_data.get("values")
                 return MapSchema(values, names, other_props)
-            elif type == 'error_union':
-                declared_errors = json_data.get('declared_errors')
+            elif type == "error_union":
+                declared_errors = json_data.get("declared_errors")
                 return ErrorUnionSchema(declared_errors, names)
             else:
-                raise avro.errors.SchemaParseException('Unknown Valid Type: %s' % type)
+                raise avro.errors.SchemaParseException(f"Unknown Valid Type: {type}")
         elif type is None:
-            raise avro.errors.SchemaParseException('No "type" property: %s' % json_data)
+            raise avro.errors.SchemaParseException(f'No "type" property: {json_data}')
         else:
-            raise avro.errors.SchemaParseException('Undefined type: %s' % type)
+            raise avro.errors.SchemaParseException(f"Undefined type: {type}")
     # JSON array (union)
     elif isinstance(json_data, list):
         return UnionSchema(json_data, names)
@@ -1308,9 +1358,10 @@ def make_avsc_object(json_data, names=None, validate_enum_symbols=True):
         return PrimitiveSchema(json_data)
     # not for us!
     else:
-        fail_msg = "Could not make an Avro Schema object from %s." % json_data
+        fail_msg = f"Could not make an Avro Schema object from {json_data}"
         raise avro.errors.SchemaParseException(fail_msg)
 
+
 # TODO(hammer): make method for reading from a file?
 
 
@@ -1325,7 +1376,7 @@ def parse(json_string, validate_enum_symbols=True):
     try:
         json_data = json.loads(json_string)
     except Exception as e:
-        msg = 'Error parsing JSON: {}, error = {}'.format(json_string, e)
+        msg = f"Error parsing JSON: {json_string}, error = {e}"
         new_exception = avro.errors.SchemaParseException(msg)
         traceback = sys.exc_info()[2]
         raise new_exception.with_traceback(traceback)
diff --git a/lang/py/avro/test/__init__.py b/lang/py/avro/test/__init__.py
index ea19c54..accfe17 100644
--- a/lang/py/avro/test/__init__.py
+++ b/lang/py/avro/test/__init__.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
diff --git a/lang/py/avro/test/gen_interop_data.py b/lang/py/avro/test/gen_interop_data.py
index 37d1fce..68672ba 100644
--- a/lang/py/avro/test/gen_interop_data.py
+++ b/lang/py/avro/test/gen_interop_data.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -28,39 +26,36 @@ import avro.datafile
 import avro.io
 import avro.schema
 
-NULL_CODEC = 'null'
+NULL_CODEC = "null"
 CODECS_TO_VALIDATE = avro.codecs.supported_codec_names()
 
 DATUM = {
-    'intField': 12,
-    'longField': 15234324,
-    'stringField': 'hey',
-    'boolField': True,
-    'floatField': 1234.0,
-    'doubleField': -1234.0,
-    'bytesField': b'12312adf',
-    'nullField': None,
-    'arrayField': [5.0, 0.0, 12.0],
-    'mapField': {'a': {'label': 'a'},
-                 'bee': {'label': 'cee'}},
-    'unionField': 12.0,
-    'enumField': 'C',
-    'fixedField': b'1019181716151413',
-    'recordField': {'label': 'blah',
-                    'children': [{'label': 'inner', 'children': []}]},
+    "intField": 12,
+    "longField": 15234324,
+    "stringField": "hey",
+    "boolField": True,
+    "floatField": 1234.0,
+    "doubleField": -1234.0,
+    "bytesField": b"12312adf",
+    "nullField": None,
+    "arrayField": [5.0, 0.0, 12.0],
+    "mapField": {"a": {"label": "a"}, "bee": {"label": "cee"}},
+    "unionField": 12.0,
+    "enumField": "C",
+    "fixedField": b"1019181716151413",
+    "recordField": {"label": "blah", "children": [{"label": "inner", "children": []}]},
 }
 
 
 def generate(schema_path, output_path):
-    with open(schema_path, 'r') as schema_file:
+    with open(schema_path) as schema_file:
         interop_schema = avro.schema.parse(schema_file.read())
     for codec in CODECS_TO_VALIDATE:
         filename = output_path
         if codec != NULL_CODEC:
             base, ext = os.path.splitext(output_path)
             filename = base + "_" + codec + ext
-        with avro.datafile.DataFileWriter(open(filename, 'wb'), avro.io.DatumWriter(),
-                                          interop_schema, codec=codec) as dfw:
+        with avro.datafile.DataFileWriter(open(filename, "wb"), avro.io.DatumWriter(), interop_schema, codec=codec) as dfw:
             dfw.append(DATUM)
 
 
diff --git a/lang/py/avro/test/mock_tether_parent.py b/lang/py/avro/test/mock_tether_parent.py
index 6fa8c82..a1ec629 100644
--- a/lang/py/avro/test/mock_tether_parent.py
+++ b/lang/py/avro/test/mock_tether_parent.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -29,7 +27,7 @@ import avro.protocol
 import avro.tether.tether_task
 import avro.tether.util
 
-SERVER_ADDRESS = ('localhost', avro.tether.util.find_port())
+SERVER_ADDRESS = ("localhost", avro.tether.util.find_port())
 
 
 class MockParentResponder(avro.ipc.Responder):
@@ -37,29 +35,22 @@ class MockParentResponder(avro.ipc.Responder):
     The responder for the mocked parent
     """
 
-    def __init__(self):
-        avro.ipc.Responder.__init__(self, avro.tether.tether_task.outputProtocol)
+    def __init__(self) -> None:
+        super().__init__(avro.tether.tether_task.outputProtocol)
 
-    def invoke(self, message, request):
-        if message.name == 'configure':
-            print("MockParentResponder: Received 'configure': inputPort={0}".format(request["port"]))
-
-        elif message.name == 'status':
-            print("MockParentResponder: Received 'status': message={0}".format(request["message"]))
-        elif message.name == 'fail':
-            print("MockParentResponder: Received 'fail': message={0}".format(request["message"]))
-        else:
-            print("MockParentResponder: Received {0}".format(message.name))
-
-        # flush the output so it shows up in the parent process
-        sys.stdout.flush()
-
-        return None
+    def invoke(self, message, request) -> None:
+        response = f"MockParentResponder: Received '{message.name}'"
+        responses = {
+            "configure": f"{response}': inputPort={request.get('port')}",
+            "status": f"{response}: message={request.get('message')}",
+            "fail": f"{response}: message={request.get('message')}",
+        }
+        print(responses.get(message.name, response))
+        sys.stdout.flush()  # flush the output so it shows up in the parent process
 
 
 class MockParentHandler(http.server.BaseHTTPRequestHandler):
-    """Create a handler for the parent.
-    """
+    """Create a handler for the parent."""
 
     def do_POST(self):
         self.responder = MockParentResponder()
@@ -67,25 +58,25 @@ class MockParentHandler(http.server.BaseHTTPRequestHandler):
         call_request = call_request_reader.read_framed_message()
         resp_body = self.responder.respond(call_request)
         self.send_response(200)
-        self.send_header('Content-Type', 'avro/binary')
+        self.send_header("Content-Type", "avro/binary")
         self.end_headers()
         resp_writer = avro.ipc.FramedWriter(self.wfile)
         resp_writer.write_framed_message(resp_body)
 
 
-if __name__ == '__main__':
-    if (len(sys.argv) <= 1):
+if __name__ == "__main__":
+    if len(sys.argv) <= 1:
         raise avro.errors.UsageError("Usage: mock_tether_parent command")
 
     cmd = sys.argv[1].lower()
-    if (sys.argv[1] == 'start_server'):
-        if (len(sys.argv) == 3):
+    if sys.argv[1] == "start_server":
+        if len(sys.argv) == 3:
             port = int(sys.argv[2])
         else:
             raise avro.errors.UsageError("Usage: mock_tether_parent start_server port")
 
         SERVER_ADDRESS = (SERVER_ADDRESS[0], port)
-        print("mock_tether_parent: Launching Server on Port: {0}".format(SERVER_ADDRESS[1]))
+        print(f"mock_tether_parent: Launching Server on Port: {SERVER_ADDRESS[1]}")
 
         # flush the output so it shows up in the parent process
         sys.stdout.flush()
diff --git a/lang/py/avro/test/sample_http_client.py b/lang/py/avro/test/sample_http_client.py
index 1b9ab6f..f1fc254 100644
--- a/lang/py/avro/test/sample_http_client.py
+++ b/lang/py/avro/test/sample_http_client.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -53,7 +51,7 @@ MAIL_PROTOCOL_JSON = """\
 }
 """
 MAIL_PROTOCOL = avro.protocol.parse(MAIL_PROTOCOL_JSON)
-SERVER_HOST = 'localhost'
+SERVER_HOST = "localhost"
 SERVER_PORT = 9090
 
 
@@ -62,16 +60,16 @@ def make_requestor(server_host, server_port, protocol):
     return avro.ipc.Requestor(protocol, client)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     if len(sys.argv) not in [4, 5]:
         raise avro.errors.UsageError("Usage: <to> <from> <body> [<count>]")
 
     # client code - attach to the server and send a message
     # fill in the Message record
     message = dict()
-    message['to'] = sys.argv[1]
-    message['from'] = sys.argv[2]
-    message['body'] = sys.argv[3]
+    message["to"] = sys.argv[1]
+    message["from"] = sys.argv[2]
+    message["body"] = sys.argv[3]
 
     try:
         num_messages = int(sys.argv[4])
@@ -80,15 +78,15 @@ if __name__ == '__main__':
 
     # build the parameters for the request
     params = {}
-    params['message'] = message
+    params["message"] = message
 
     # send the requests and print the result
     for msg_count in range(num_messages):
         requestor = make_requestor(SERVER_HOST, SERVER_PORT, MAIL_PROTOCOL)
-        result = requestor.request('send', params)
+        result = requestor.request("send", params)
         print("Result: " + result)
 
     # try out a replay message
     requestor = make_requestor(SERVER_HOST, SERVER_PORT, MAIL_PROTOCOL)
-    result = requestor.request('replay', dict())
+    result = requestor.request("replay", dict())
     print("Replay Result: " + result)
diff --git a/lang/py/avro/test/sample_http_server.py b/lang/py/avro/test/sample_http_server.py
index dea1309..e6873ab 100644
--- a/lang/py/avro/test/sample_http_server.py
+++ b/lang/py/avro/test/sample_http_server.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -19,6 +17,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import json
+
 import avro.ipc
 import avro.protocol
 
@@ -27,34 +27,25 @@ try:
 except ImportError:
     import http.server as http_server  # type: ignore
 
-MAIL_PROTOCOL_JSON = """\
-{"namespace": "example.proto",
- "protocol": "Mail",
-
- "types": [
-     {"name": "Message", "type": "record",
-      "fields": [
-          {"name": "to",   "type": "string"},
-          {"name": "from", "type": "string"},
-          {"name": "body", "type": "string"}
-      ]
-     }
- ],
-
- "messages": {
-     "send": {
-         "request": [{"name": "message", "type": "Message"}],
-         "response": "string"
-     },
-     "replay": {
-         "request": [],
-         "response": "string"
-     }
- }
-}
-"""
+MAIL_PROTOCOL_JSON = json.dumps(
+    {
+        "namespace": "example.proto",
+        "protocol": "Mail",
+        "types": [
+            {
+                "name": "Message",
+                "type": "record",
+                "fields": [{"name": "to", "type": "string"}, {"name": "from", "type": "string"}, {"name": "body", "type": "string"}],
+            }
+        ],
+        "messages": {
+            "send": {"request": [{"name": "message", "type": "Message"}], "response": "string"},
+            "replay": {"request": [], "response": "string"},
+        },
+    }
+)
 MAIL_PROTOCOL = avro.protocol.parse(MAIL_PROTOCOL_JSON)
-SERVER_ADDRESS = ('localhost', 9090)
+SERVER_ADDRESS = ("localhost", 9090)
 
 
 class MailResponder(avro.ipc.Responder):
@@ -62,13 +53,10 @@ class MailResponder(avro.ipc.Responder):
         avro.ipc.Responder.__init__(self, MAIL_PROTOCOL)
 
     def invoke(self, message, request):
-        if message.name == 'send':
-            request_content = request['message']
-            response = "Sent message to %(to)s from %(from)s with body %(body)s" % \
-                       request_content
-            return response
-        elif message.name == 'replay':
-            return 'replay'
+        if message.name == "send":
+            return f"Sent message to {request['message']['to']} from {request['message']['from']} with body {request['message']['body']}"
+        if message.name == "replay":
+            return "replay"
 
 
 class MailHandler(http_server.BaseHTTPRequestHandler):
@@ -78,13 +66,13 @@ class MailHandler(http_server.BaseHTTPRequestHandler):
         call_request = call_request_reader.read_framed_message()
         resp_body = self.responder.respond(call_request)
         self.send_response(200)
-        self.send_header('Content-Type', 'avro/binary')
+        self.send_header("Content-Type", "avro/binary")
         self.end_headers()
         resp_writer = avro.ipc.FramedWriter(self.wfile)
         resp_writer.write_framed_message(resp_body)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     mail_server = http_server.HTTPServer(SERVER_ADDRESS, MailHandler)
     mail_server.allow_reuse_address = True
     mail_server.serve_forever()
diff --git a/lang/py/avro/test/test_bench.py b/lang/py/avro/test/test_bench.py
index 863315a..3bd1689 100644
--- a/lang/py/avro/test/test_bench.py
+++ b/lang/py/avro/test/test_bench.py
@@ -86,18 +86,13 @@ def rand_ip():
 
 
 def picks(n):
-    return [
-        {"query": rand_name(), "response": rand_ip(), "type": random.choice(TYPES)}
-        for _ in range(n)
-    ]
+    return [{"query": rand_name(), "response": rand_ip(), "type": random.choice(TYPES)} for _ in range(n)]
 
 
 def time_writes(path, number):
     with avro.datafile.DataFileWriter(open(path, "wb"), WRITER, SCHEMA) as dw:
         globals_ = {"dw": dw, "picks": picks(number)}
-        return timeit.timeit(
-            "dw.append(next(p))", number=number, setup="p=iter(picks)", globals=globals_
-        )
+        return timeit.timeit("dw.append(next(p))", number=number, setup="p=iter(picks)", globals=globals_)
 
 
 def time_read(path):
diff --git a/lang/py/avro/test/test_compatibility.py b/lang/py/avro/test/test_compatibility.py
index 91e136d..58c09a0 100644
--- a/lang/py/avro/test/test_compatibility.py
+++ b/lang/py/avro/test/test_compatibility.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -22,8 +20,20 @@
 import json
 import unittest
 
-from avro.compatibility import ReaderWriterCompatibilityChecker, SchemaCompatibilityType, SchemaType
-from avro.schema import ArraySchema, MapSchema, Names, PrimitiveSchema, Schema, UnionSchema, parse
+from avro.compatibility import (
+    ReaderWriterCompatibilityChecker,
+    SchemaCompatibilityType,
+    SchemaType,
+)
+from avro.schema import (
+    ArraySchema,
+    MapSchema,
+    Names,
+    PrimitiveSchema,
+    Schema,
+    UnionSchema,
+    parse,
+)
 
 BOOLEAN_SCHEMA = PrimitiveSchema(SchemaType.BOOLEAN)
 NULL_SCHEMA = PrimitiveSchema(SchemaType.NULL)
@@ -43,76 +53,85 @@ ENUM1_AB_SCHEMA = parse(json.dumps({"type": SchemaType.ENUM, "name": "Enum1", "s
 ENUM1_ABC_SCHEMA = parse(json.dumps({"type": SchemaType.ENUM, "name": "Enum1", "symbols": ["A", "B", "C"]}))
 ENUM1_BC_SCHEMA = parse(json.dumps({"type": SchemaType.ENUM, "name": "Enum1", "symbols": ["B", "C"]}))
 ENUM2_AB_SCHEMA = parse(json.dumps({"type": SchemaType.ENUM, "name": "Enum2", "symbols": ["A", "B"]}))
-ENUM_ABC_ENUM_DEFAULT_A_SCHEMA = parse(
-    json.dumps({
-        "type": "enum",
-        "name": "Enum",
-        "symbols": ["A", "B", "C"],
-        "default": "A"
-    })
-)
+ENUM_ABC_ENUM_DEFAULT_A_SCHEMA = parse(json.dumps({"type": "enum", "name": "Enum", "symbols": ["A", "B", "C"], "default": "A"}))
 ENUM_AB_ENUM_DEFAULT_A_SCHEMA = parse(json.dumps({"type": SchemaType.ENUM, "name": "Enum", "symbols": ["A", "B"], "default": "A"}))
 ENUM_ABC_ENUM_DEFAULT_A_RECORD = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record",
-        "fields": [{
-            "name": "Field",
-            "type": {
-                "type": SchemaType.ENUM,
-                "name": "Enum",
-                "symbols": ["A", "B", "C"],
-                "default": "A"
-            }
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record",
+            "fields": [
+                {
+                    "name": "Field",
+                    "type": {
+                        "type": SchemaType.ENUM,
+                        "name": "Enum",
+                        "symbols": ["A", "B", "C"],
+                        "default": "A",
+                    },
+                }
+            ],
+        }
+    )
 )
 ENUM_AB_ENUM_DEFAULT_A_RECORD = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record",
-        "fields": [{
-            "name": "Field",
-            "type": {
-                "type": SchemaType.ENUM,
-                "name": "Enum",
-                "symbols": ["A", "B"],
-                "default": "A"
-            }
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record",
+            "fields": [
+                {
+                    "name": "Field",
+                    "type": {
+                        "type": SchemaType.ENUM,
+                        "name": "Enum",
+                        "symbols": ["A", "B"],
+                        "default": "A",
+                    },
+                }
+            ],
+        }
+    )
 )
 ENUM_ABC_FIELD_DEFAULT_B_ENUM_DEFAULT_A_RECORD = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record",
-        "fields": [{
-            "name": "Field",
-            "type": {
-                "type": SchemaType.ENUM,
-                "name": "Enum",
-                "symbols": ["A", "B", "C"],
-                "default": "A"
-            },
-            "default": "B"
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record",
+            "fields": [
+                {
+                    "name": "Field",
+                    "type": {
+                        "type": SchemaType.ENUM,
+                        "name": "Enum",
+                        "symbols": ["A", "B", "C"],
+                        "default": "A",
+                    },
+                    "default": "B",
+                }
+            ],
+        }
+    )
 )
 ENUM_AB_FIELD_DEFAULT_A_ENUM_DEFAULT_B_RECORD = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record",
-        "fields": [{
-            "name": "Field",
-            "type": {
-                "type": SchemaType.ENUM,
-                "name": "Enum",
-                "symbols": ["A", "B"],
-                "default": "B"
-            },
-            "default": "A"
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record",
+            "fields": [
+                {
+                    "name": "Field",
+                    "type": {
+                        "type": SchemaType.ENUM,
+                        "name": "Enum",
+                        "symbols": ["A", "B"],
+                        "default": "B",
+                    },
+                    "default": "A",
+                }
+            ],
+        }
+    )
 )
 EMPTY_UNION_SCHEMA = UnionSchema([], names=Names())
 NULL_UNION_SCHEMA = UnionSchema([SchemaType.NULL], names=Names())
@@ -126,225 +145,252 @@ INT_STRING_UNION_SCHEMA = UnionSchema([SchemaType.INT, SchemaType.STRING], names
 STRING_INT_UNION_SCHEMA = UnionSchema([SchemaType.STRING, SchemaType.INT], names=Names())
 INT_FLOAT_UNION_SCHEMA = UnionSchema([SchemaType.INT, SchemaType.FLOAT], names=Names())
 INT_LONG_UNION_SCHEMA = UnionSchema([SchemaType.INT, SchemaType.LONG], names=Names())
-INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA = UnionSchema([SchemaType.INT, SchemaType.LONG, SchemaType.FLOAT, SchemaType.DOUBLE], names=Names())
-NULL_INT_ARRAY_UNION_SCHEMA = UnionSchema([{"type": SchemaType.NULL}, {"type": SchemaType.ARRAY, "items": SchemaType.INT}], names=Names())
-NULL_INT_MAP_UNION_SCHEMA = UnionSchema([{"type": SchemaType.NULL}, {"type": SchemaType.MAP, "values": SchemaType.INT}], names=Names())
+INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA = UnionSchema(
+    [SchemaType.INT, SchemaType.LONG, SchemaType.FLOAT, SchemaType.DOUBLE],
+    names=Names(),
+)
+NULL_INT_ARRAY_UNION_SCHEMA = UnionSchema(
+    [{"type": SchemaType.NULL}, {"type": SchemaType.ARRAY, "items": SchemaType.INT}],
+    names=Names(),
+)
+NULL_INT_MAP_UNION_SCHEMA = UnionSchema(
+    [{"type": SchemaType.NULL}, {"type": SchemaType.MAP, "values": SchemaType.INT}],
+    names=Names(),
+)
 EMPTY_RECORD1 = parse(json.dumps({"type": SchemaType.RECORD, "name": "Record1", "fields": []}))
 EMPTY_RECORD2 = parse(json.dumps({"type": SchemaType.RECORD, "name": "Record2", "fields": []}))
-A_INT_RECORD1 = parse(json.dumps({"type": SchemaType.RECORD, "name": "Record1", "fields": [{"name": "a", "type": SchemaType.INT}]}))
-A_LONG_RECORD1 = parse(json.dumps({"type": SchemaType.RECORD, "name": "Record1", "fields": [{"name": "a", "type": SchemaType.LONG}]}))
+A_INT_RECORD1 = parse(
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [{"name": "a", "type": SchemaType.INT}],
+        }
+    )
+)
+A_LONG_RECORD1 = parse(
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [{"name": "a", "type": SchemaType.LONG}],
+        }
+    )
+)
 A_INT_B_INT_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT
-        }, {
-            "name": "b",
-            "type": SchemaType.INT
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT},
+                {"name": "b", "type": SchemaType.INT},
+            ],
+        }
+    )
 )
 A_DINT_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT,
-            "default": 0
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [{"name": "a", "type": SchemaType.INT, "default": 0}],
+        }
+    )
 )
 A_INT_B_DINT_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT
-        }, {
-            "name": "b",
-            "type": SchemaType.INT,
-            "default": 0
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT},
+                {"name": "b", "type": SchemaType.INT, "default": 0},
+            ],
+        }
+    )
 )
 A_DINT_B_DINT_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT,
-            "default": 0
-        }, {
-            "name": "b",
-            "type": SchemaType.INT,
-            "default": 0
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT, "default": 0},
+                {"name": "b", "type": SchemaType.INT, "default": 0},
+            ],
+        }
+    )
 )
 A_DINT_B_DFIXED_4_BYTES_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT,
-            "default": 0
-        }, {
-            "name": "b",
-            "type": {
-                "type": SchemaType.FIXED,
-                "name": "Fixed",
-                "size": 4
-            }
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT, "default": 0},
+                {
+                    "name": "b",
+                    "type": {"type": SchemaType.FIXED, "name": "Fixed", "size": 4},
+                },
+            ],
+        }
+    )
 )
 A_DINT_B_DFIXED_8_BYTES_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT,
-            "default": 0
-        }, {
-            "name": "b",
-            "type": {
-                "type": SchemaType.FIXED,
-                "name": "Fixed",
-                "size": 8
-            }
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT, "default": 0},
+                {
+                    "name": "b",
+                    "type": {"type": SchemaType.FIXED, "name": "Fixed", "size": 8},
+                },
+            ],
+        }
+    )
 )
 A_DINT_B_DINT_STRING_UNION_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT,
-            "default": 0
-        }, {
-            "name": "b",
-            "type": [SchemaType.INT, SchemaType.STRING],
-            "default": 0
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT, "default": 0},
+                {
+                    "name": "b",
+                    "type": [SchemaType.INT, SchemaType.STRING],
+                    "default": 0,
+                },
+            ],
+        }
+    )
 )
 A_DINT_B_DINT_UNION_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT,
-            "default": 0
-        }, {
-            "name": "b",
-            "type": [SchemaType.INT],
-            "default": 0
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT, "default": 0},
+                {"name": "b", "type": [SchemaType.INT], "default": 0},
+            ],
+        }
+    )
 )
 A_DINT_B_DENUM_1_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT,
-            "default": 0
-        }, {
-            "name": "b",
-            "type": {
-                "type": SchemaType.ENUM,
-                "name": "Enum1",
-                "symbols": ["A", "B"]
-            }
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT, "default": 0},
+                {
+                    "name": "b",
+                    "type": {
+                        "type": SchemaType.ENUM,
+                        "name": "Enum1",
+                        "symbols": ["A", "B"],
+                    },
+                },
+            ],
+        }
+    )
 )
 A_DINT_B_DENUM_2_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "a",
-            "type": SchemaType.INT,
-            "default": 0
-        }, {
-            "name": "b",
-            "type": {
-                "type": SchemaType.ENUM,
-                "name": "Enum2",
-                "symbols": ["A", "B"]
-            }
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {"name": "a", "type": SchemaType.INT, "default": 0},
+                {
+                    "name": "b",
+                    "type": {
+                        "type": SchemaType.ENUM,
+                        "name": "Enum2",
+                        "symbols": ["A", "B"],
+                    },
+                },
+            ],
+        }
+    )
 )
 FIXED_4_BYTES = parse(json.dumps({"type": SchemaType.FIXED, "name": "Fixed", "size": 4}))
 FIXED_8_BYTES = parse(json.dumps({"type": SchemaType.FIXED, "name": "Fixed", "size": 8}))
 NS_RECORD1 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "f1",
-            "type": [
-                SchemaType.NULL, {
-                    "type": SchemaType.ARRAY,
-                    "items": {
-                        "type": SchemaType.RECORD,
-                        "name": "InnerRecord1",
-                        "namespace": "ns1",
-                        "fields": [{
-                            "name": "a",
-                            "type": SchemaType.INT
-                        }]
-                    }
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {
+                    "name": "f1",
+                    "type": [
+                        SchemaType.NULL,
+                        {
+                            "type": SchemaType.ARRAY,
+                            "items": {
+                                "type": SchemaType.RECORD,
+                                "name": "InnerRecord1",
+                                "namespace": "ns1",
+                                "fields": [{"name": "a", "type": SchemaType.INT}],
+                            },
+                        },
+                    ],
                 }
-            ]
-        }]
-    })
+            ],
+        }
+    )
 )
 NS_RECORD2 = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "f1",
-            "type": [
-                SchemaType.NULL, {
-                    "type": SchemaType.ARRAY,
-                    "items": {
-                        "type": SchemaType.RECORD,
-                        "name": "InnerRecord1",
-                        "namespace": "ns2",
-                        "fields": [{
-                            "name": "a",
-                            "type": SchemaType.INT
-                        }]
-                    }
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [
+                {
+                    "name": "f1",
+                    "type": [
+                        SchemaType.NULL,
+                        {
+                            "type": SchemaType.ARRAY,
+                            "items": {
+                                "type": SchemaType.RECORD,
+                                "name": "InnerRecord1",
+                                "namespace": "ns2",
+                                "fields": [{"name": "a", "type": SchemaType.INT}],
+                            },
+                        },
+                    ],
                 }
-            ]
-        }]
-    })
+            ],
+        }
+    )
 )
 
-UNION_INT_RECORD1 = UnionSchema([
-    {"type": SchemaType.INT},
-    {"type": SchemaType.RECORD, "name": "Record1", "fields": [{"name": "field1", "type": SchemaType.INT}]}
-])
-UNION_INT_RECORD2 = UnionSchema([
-    {"type": SchemaType.INT},
-    {"type": "record", "name": "Record2", "fields": [{"name": "field1", "type": SchemaType.INT}]}
-])
+UNION_INT_RECORD1 = UnionSchema(
+    [
+        {"type": SchemaType.INT},
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [{"name": "field1", "type": SchemaType.INT}],
+        },
+    ]
+)
+UNION_INT_RECORD2 = UnionSchema(
+    [
+        {"type": SchemaType.INT},
+        {
+            "type": "record",
+            "name": "Record2",
+            "fields": [{"name": "field1", "type": SchemaType.INT}],
+        },
+    ]
+)
 UNION_INT_ENUM1_AB = UnionSchema([{"type": SchemaType.INT}, ENUM1_AB_SCHEMA.to_json()])
 UNION_INT_FIXED_4_BYTES = UnionSchema([{"type": SchemaType.INT}, FIXED_4_BYTES.to_json()])
 UNION_INT_BOOLEAN = UnionSchema([{"type": SchemaType.INT}, {"type": SchemaType.BOOLEAN}])
@@ -353,64 +399,61 @@ UNION_INT_MAP_INT = UnionSchema([{"type": SchemaType.INT}, INT_MAP_SCHEMA.to_jso
 UNION_INT_NULL = UnionSchema([{"type": SchemaType.INT}, {"type": SchemaType.NULL}])
 FIXED_4_ANOTHER_NAME = parse(json.dumps({"type": SchemaType.FIXED, "name": "AnotherName", "size": 4}))
 RECORD1_WITH_ENUM_AB = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "field1",
-            "type": dict(ENUM1_AB_SCHEMA.to_json())
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [{"name": "field1", "type": dict(ENUM1_AB_SCHEMA.to_json())}],
+        }
+    )
 )
 RECORD1_WITH_ENUM_ABC = parse(
-    json.dumps({
-        "type": SchemaType.RECORD,
-        "name": "Record1",
-        "fields": [{
-            "name": "field1",
-            "type": dict(ENUM1_ABC_SCHEMA.to_json())
-        }]
-    })
+    json.dumps(
+        {
+            "type": SchemaType.RECORD,
+            "name": "Record1",
+            "fields": [{"name": "field1", "type": dict(ENUM1_ABC_SCHEMA.to_json())}],
+        }
+    )
 )
 
 
 class TestCompatibility(unittest.TestCase):
     def test_simple_schema_promotion(self):
         field_alias_reader = parse(
-            json.dumps({
-                "name": "foo",
-                "type": "record",
-                "fields": [{
-                    "type": "int",
-                    "name": "bar",
-                    "aliases": ["f1"]
-                }]
-            })
+            json.dumps(
+                {
+                    "name": "foo",
+                    "type": "record",
+                    "fields": [{"type": "int", "name": "bar", "aliases": ["f1"]}],
+                }
+            )
         )
         record_alias_reader = parse(
-            json.dumps({
-                "name": "other",
-                "type": "record",
-                "fields": [{
-                    "type": "int",
-                    "name": "f1"
-                }],
-                "aliases": ["foo"]
-            })
+            json.dumps(
+                {
+                    "name": "other",
+                    "type": "record",
+                    "fields": [{"type": "int", "name": "f1"}],
+                    "aliases": ["foo"],
+                }
+            )
         )
 
         writer = parse(
-            json.dumps({
-                "name": "foo",
-                "type": "record",
-                "fields": [{
-                    "type": "int",
-                    "name": "f1"
-                }, {
-                    "type": "string",
-                    "name": "f2",
-                }]
-            })
+            json.dumps(
+                {
+                    "name": "foo",
+                    "type": "record",
+                    "fields": [
+                        {"type": "int", "name": "f1"},
+                        {
+                            "type": "string",
+                            "name": "f2",
+                        },
+                    ],
+                }
+            )
         )
         # alias testing
         res = ReaderWriterCompatibilityChecker().get_compatibility(field_alias_reader, writer)
@@ -421,67 +464,78 @@ class TestCompatibility(unittest.TestCase):
     def test_schema_compatibility(self):
         # testValidateSchemaPairMissingField
         writer = parse(
-            json.dumps({
-                "type": SchemaType.RECORD,
-                "name": "Record",
-                "fields": [{
-                    "name": "oldField1",
-                    "type": SchemaType.INT
-                }, {
-                    "name": "oldField2",
-                    "type": SchemaType.STRING
-                }]
-            })
+            json.dumps(
+                {
+                    "type": SchemaType.RECORD,
+                    "name": "Record",
+                    "fields": [
+                        {"name": "oldField1", "type": SchemaType.INT},
+                        {"name": "oldField2", "type": SchemaType.STRING},
+                    ],
+                }
+            )
+        )
+        reader = parse(
+            json.dumps(
+                {
+                    "type": SchemaType.RECORD,
+                    "name": "Record",
+                    "fields": [{"name": "oldField1", "type": SchemaType.INT}],
+                }
+            )
         )
-        reader = parse(json.dumps({"type": SchemaType.RECORD, "name": "Record", "fields": [{"name": "oldField1", "type": SchemaType.INT}]}))
         self.assertTrue(self.are_compatible(reader, writer))
         # testValidateSchemaPairMissingSecondField
-        reader = parse(json.dumps({"type": SchemaType.RECORD, "name": "Record", "fields": [{"name": "oldField2", "type": SchemaType.STRING}]}))
+        reader = parse(
+            json.dumps(
+                {
+                    "type": SchemaType.RECORD,
+                    "name": "Record",
+                    "fields": [{"name": "oldField2", "type": SchemaType.STRING}],
+                }
+            )
+        )
         self.assertTrue(self.are_compatible(reader, writer))
         # testValidateSchemaPairAllFields
         reader = parse(
-            json.dumps({
-                "type": SchemaType.RECORD,
-                "name": "Record",
-                "fields": [{
-                    "name": "oldField1",
-                    "type": SchemaType.INT
-                }, {
-                    "name": "oldField2",
-                    "type": SchemaType.STRING
-                }]
-            })
+            json.dumps(
+                {
+                    "type": SchemaType.RECORD,
+                    "name": "Record",
+                    "fields": [
+                        {"name": "oldField1", "type": SchemaType.INT},
+                        {"name": "oldField2", "type": SchemaType.STRING},
+                    ],
+                }
+            )
         )
         self.assertTrue(self.are_compatible(reader, writer))
         # testValidateSchemaNewFieldWithDefault
         reader = parse(
-            json.dumps({
-                "type": SchemaType.RECORD,
-                "name": "Record",
-                "fields": [{
-                    "name": "oldField1",
-                    "type": SchemaType.INT
-                }, {
-                    "name": "newField2",
-                    "type": SchemaType.INT,
-                    "default": 42
-                }]
-            })
+            json.dumps(
+                {
+                    "type": SchemaType.RECORD,
+                    "name": "Record",
+                    "fields": [
+                        {"name": "oldField1", "type": SchemaType.INT},
+                        {"name": "newField2", "type": SchemaType.INT, "default": 42},
+                    ],
+                }
+            )
         )
         self.assertTrue(self.are_compatible(reader, writer))
         # testValidateSchemaNewField
         reader = parse(
-            json.dumps({
-                "type": SchemaType.RECORD,
-                "name": "Record",
-                "fields": [{
-                    "name": "oldField1",
-                    "type": SchemaType.INT
-                }, {
-                    "name": "newField2",
-                    "type": SchemaType.INT
-                }]
-            })
+            json.dumps(
+                {
+                    "type": SchemaType.RECORD,
+                    "name": "Record",
+                    "fields": [
+                        {"name": "oldField1", "type": SchemaType.INT},
+                        {"name": "newField2", "type": SchemaType.INT},
+                    ],
+                }
+            )
         )
         self.assertFalse(self.are_compatible(reader, writer))
         # testValidateArrayWriterSchema
@@ -498,20 +552,32 @@ class TestCompatibility(unittest.TestCase):
         self.assertFalse(self.are_compatible(reader, writer))
         # testUnionReaderWriterSubsetIncompatibility
         writer = parse(
-            json.dumps({
-                "name": "Record",
-                "type": "record",
-                "fields": [{
-                    "name": "f1",
-                    "type": [SchemaType.INT, SchemaType.STRING, SchemaType.LONG]
-                }]
-            })
+            json.dumps(
+                {
+                    "name": "Record",
+                    "type": "record",
+                    "fields": [
+                        {
+                            "name": "f1",
+                            "type": [
+                                SchemaType.INT,
+                                SchemaType.STRING,
+                                SchemaType.LONG,
+                            ],
+                        }
+                    ],
+                }
+            )
+        )
+        reader = parse(
+            json.dumps(
+                {
+                    "name": "Record",
+                    "type": SchemaType.RECORD,
+                    "fields": [{"name": "f1", "type": [SchemaType.INT, SchemaType.STRING]}],
+                }
+            )
         )
-        reader = parse(json.dumps({
-            "name": "Record",
-            "type": SchemaType.RECORD,
-            "fields": [
-                {"name": "f1", "type": [SchemaType.INT, SchemaType.STRING]}]}))
         reader = reader.fields[0].type
         writer = writer.fields[0].type
         self.assertIsInstance(reader, UnionSchema)
@@ -588,7 +654,10 @@ class TestCompatibility(unittest.TestCase):
             (parse(json.dumps({"type": "null"})), parse(json.dumps({"type": "null"}))),
             (NULL_SCHEMA, NULL_SCHEMA),
             (ENUM_AB_ENUM_DEFAULT_A_RECORD, ENUM_ABC_ENUM_DEFAULT_A_RECORD),
-            (ENUM_AB_FIELD_DEFAULT_A_ENUM_DEFAULT_B_RECORD, ENUM_ABC_FIELD_DEFAULT_B_ENUM_DEFAULT_A_RECORD),
+            (
+                ENUM_AB_FIELD_DEFAULT_A_ENUM_DEFAULT_B_RECORD,
+                ENUM_ABC_FIELD_DEFAULT_B_ENUM_DEFAULT_A_RECORD,
+            ),
             (NS_RECORD1, NS_RECORD2),
         ]
 
@@ -599,21 +668,44 @@ class TestCompatibility(unittest.TestCase):
         incompatible_fixed_pairs = [
             (FIXED_4_BYTES, FIXED_8_BYTES, "expected: 8, found: 4", "/size"),
             (FIXED_8_BYTES, FIXED_4_BYTES, "expected: 4, found: 8", "/size"),
-            (A_DINT_B_DFIXED_8_BYTES_RECORD1, A_DINT_B_DFIXED_4_BYTES_RECORD1, "expected: 4, found: 8", "/fields/1/type/size"),
-            (A_DINT_B_DFIXED_4_BYTES_RECORD1, A_DINT_B_DFIXED_8_BYTES_RECORD1, "expected: 8, found: 4", "/fields/1/type/size"),
+            (
+                A_DINT_B_DFIXED_8_BYTES_RECORD1,
+                A_DINT_B_DFIXED_4_BYTES_RECORD1,
+                "expected: 4, found: 8",
+                "/fields/1/type/size",
+            ),
+            (
+                A_DINT_B_DFIXED_4_BYTES_RECORD1,
+                A_DINT_B_DFIXED_8_BYTES_RECORD1,
+                "expected: 8, found: 4",
+                "/fields/1/type/size",
+            ),
         ]
         for (reader, writer, message, location) in incompatible_fixed_pairs:
             result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
             self.assertIs(result.compatibility, SchemaCompatibilityType.incompatible)
-            self.assertIn(location, result.locations, "expected {}, found {}".format(location, result))
-            self.assertIn(message, result.messages, "expected {}, found {}".format(location, result))
+            self.assertIn(
+                location,
+                result.locations,
+                f"expected {location}, found {result}",
+            )
+            self.assertIn(
+                message,
+                result.messages,
+                f"expected {location}, found {result}",
+            )
 
     def test_schema_compatibility_missing_enum_symbols(self):
         incompatible_pairs = [
             # str(set) representation
             (ENUM1_AB_SCHEMA, ENUM1_ABC_SCHEMA, "{'C'}", "/symbols"),
             (ENUM1_BC_SCHEMA, ENUM1_ABC_SCHEMA, "{'A'}", "/symbols"),
-            (RECORD1_WITH_ENUM_AB, RECORD1_WITH_ENUM_ABC, "{'C'}", "/fields/0/type/symbols"),
+            (
+                RECORD1_WITH_ENUM_AB,
+                RECORD1_WITH_ENUM_ABC,
+                "{'C'}",
+                "/fields/0/type/symbols",
+            ),
         ]
         for (reader, writer, message, location) in incompatible_pairs:
             result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
@@ -623,30 +715,111 @@ class TestCompatibility(unittest.TestCase):
 
     def test_schema_compatibility_missing_union_branch(self):
         incompatible_pairs = [
-            (INT_UNION_SCHEMA, INT_STRING_UNION_SCHEMA, {"reader union lacking writer type: STRING"}, {"/1"}),
-            (STRING_UNION_SCHEMA, INT_STRING_UNION_SCHEMA, {"reader union lacking writer type: INT"}, {"/0"}),
-            (INT_UNION_SCHEMA, UNION_INT_RECORD1, {"reader union lacking writer type: RECORD"}, {"/1"}),
-            (INT_UNION_SCHEMA, UNION_INT_RECORD2, {"reader union lacking writer type: RECORD"}, {"/1"}),
-            (UNION_INT_RECORD1, UNION_INT_RECORD2, {"reader union lacking writer type: RECORD"}, {"/1"}),
-            (INT_UNION_SCHEMA, UNION_INT_ENUM1_AB, {"reader union lacking writer type: ENUM"}, {"/1"}),
-            (INT_UNION_SCHEMA, UNION_INT_FIXED_4_BYTES, {"reader union lacking writer type: FIXED"}, {"/1"}),
-            (INT_UNION_SCHEMA, UNION_INT_BOOLEAN, {"reader union lacking writer type: BOOLEAN"}, {"/1"}),
-            (INT_UNION_SCHEMA, LONG_UNION_SCHEMA, {"reader union lacking writer type: LONG"}, {"/0"}),
-            (INT_UNION_SCHEMA, FLOAT_UNION_SCHEMA, {"reader union lacking writer type: FLOAT"}, {"/0"}),
-            (INT_UNION_SCHEMA, DOUBLE_UNION_SCHEMA, {"reader union lacking writer type: DOUBLE"}, {"/0"}),
-            (INT_UNION_SCHEMA, BYTES_UNION_SCHEMA, {"reader union lacking writer type: BYTES"}, {"/0"}),
-            (INT_UNION_SCHEMA, UNION_INT_ARRAY_INT, {"reader union lacking writer type: ARRAY"}, {"/1"}),
-            (INT_UNION_SCHEMA, UNION_INT_MAP_INT, {"reader union lacking writer type: MAP"}, {"/1"}),
-            (INT_UNION_SCHEMA, UNION_INT_NULL, {"reader union lacking writer type: NULL"}, {"/1"}),
-            (
-                INT_UNION_SCHEMA, INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA, {
-                    "reader union lacking writer type: LONG", "reader union lacking writer type: FLOAT",
-                    "reader union lacking writer type: DOUBLE"
-                }, {"/1", "/2", "/3"}
-            ),
-            (
-                A_DINT_B_DINT_UNION_RECORD1, A_DINT_B_DINT_STRING_UNION_RECORD1, {"reader union lacking writer type: STRING"},
-                {"/fields/1/type/1"}
+            (
+                INT_UNION_SCHEMA,
+                INT_STRING_UNION_SCHEMA,
+                {"reader union lacking writer type: STRING"},
+                {"/1"},
+            ),
+            (
+                STRING_UNION_SCHEMA,
+                INT_STRING_UNION_SCHEMA,
+                {"reader union lacking writer type: INT"},
+                {"/0"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                UNION_INT_RECORD1,
+                {"reader union lacking writer type: RECORD"},
+                {"/1"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                UNION_INT_RECORD2,
+                {"reader union lacking writer type: RECORD"},
+                {"/1"},
+            ),
+            (
+                UNION_INT_RECORD1,
+                UNION_INT_RECORD2,
+                {"reader union lacking writer type: RECORD"},
+                {"/1"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                UNION_INT_ENUM1_AB,
+                {"reader union lacking writer type: ENUM"},
+                {"/1"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                UNION_INT_FIXED_4_BYTES,
+                {"reader union lacking writer type: FIXED"},
+                {"/1"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                UNION_INT_BOOLEAN,
+                {"reader union lacking writer type: BOOLEAN"},
+                {"/1"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                LONG_UNION_SCHEMA,
+                {"reader union lacking writer type: LONG"},
+                {"/0"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                FLOAT_UNION_SCHEMA,
+                {"reader union lacking writer type: FLOAT"},
+                {"/0"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                DOUBLE_UNION_SCHEMA,
+                {"reader union lacking writer type: DOUBLE"},
+                {"/0"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                BYTES_UNION_SCHEMA,
+                {"reader union lacking writer type: BYTES"},
+                {"/0"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                UNION_INT_ARRAY_INT,
+                {"reader union lacking writer type: ARRAY"},
+                {"/1"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                UNION_INT_MAP_INT,
+                {"reader union lacking writer type: MAP"},
+                {"/1"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                UNION_INT_NULL,
+                {"reader union lacking writer type: NULL"},
+                {"/1"},
+            ),
+            (
+                INT_UNION_SCHEMA,
+                INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA,
+                {
+                    "reader union lacking writer type: LONG",
+                    "reader union lacking writer type: FLOAT",
+                    "reader union lacking writer type: DOUBLE",
+                },
+                {"/1", "/2", "/3"},
+            ),
+            (
+                A_DINT_B_DINT_UNION_RECORD1,
+                A_DINT_B_DINT_STRING_UNION_RECORD1,
+                {"reader union lacking writer type: STRING"},
+                {"/fields/1/type/1"},
             ),
         ]
 
@@ -661,7 +834,13 @@ class TestCompatibility(unittest.TestCase):
             (ENUM1_AB_SCHEMA, ENUM2_AB_SCHEMA, "expected: Enum2", "/name"),
             (EMPTY_RECORD2, EMPTY_RECORD1, "expected: Record1", "/name"),
             (FIXED_4_BYTES, FIXED_4_ANOTHER_NAME, "expected: AnotherName", "/name"),
-            (A_DINT_B_DENUM_1_RECORD1, A_DINT_B_DENUM_2_RECORD1, "expected: Enum2", "/fields/1/type/name")]
+            (
+                A_DINT_B_DENUM_1_RECORD1,
+                A_DINT_B_DENUM_2_RECORD1,
+                "expected: Enum2",
+                "/fields/1/type/name",
+            ),
+        ]
 
         for (reader, writer, message, location) in incompatible_pairs:
             result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
@@ -684,38 +863,175 @@ class TestCompatibility(unittest.TestCase):
 
     def test_schema_compatibility_type_mismatch(self):
         incompatible_pairs = [
-            (NULL_SCHEMA, INT_SCHEMA, "reader type: NULL not compatible with writer type: INT", "/"),
-            (NULL_SCHEMA, LONG_SCHEMA, "reader type: NULL not compatible with writer type: LONG", "/"),
-            (BOOLEAN_SCHEMA, INT_SCHEMA, "reader type: BOOLEAN not compatible with writer type: INT", "/"),
-            (INT_SCHEMA, NULL_SCHEMA, "reader type: INT not compatible with writer type: NULL", "/"),
-            (INT_SCHEMA, BOOLEAN_SCHEMA, "reader type: INT not compatible with writer type: BOOLEAN", "/"),
-            (INT_SCHEMA, LONG_SCHEMA, "reader type: INT not compatible with writer type: LONG", "/"),
-            (INT_SCHEMA, FLOAT_SCHEMA, "reader type: INT not compatible with writer type: FLOAT", "/"),
-            (INT_SCHEMA, DOUBLE_SCHEMA, "reader type: INT not compatible with writer type: DOUBLE", "/"),
-            (LONG_SCHEMA, FLOAT_SCHEMA, "reader type: LONG not compatible with writer type: FLOAT", "/"),
-            (LONG_SCHEMA, DOUBLE_SCHEMA, "reader type: LONG not compatible with writer type: DOUBLE", "/"),
-            (FLOAT_SCHEMA, DOUBLE_SCHEMA, "reader type: FLOAT not compatible with writer type: DOUBLE", "/"),
-            (DOUBLE_SCHEMA, STRING_SCHEMA, "reader type: DOUBLE not compatible with writer type: STRING", "/"),
-            (FIXED_4_BYTES, STRING_SCHEMA, "reader type: FIXED not compatible with writer type: STRING", "/"),
-            (STRING_SCHEMA, BOOLEAN_SCHEMA, "reader type: STRING not compatible with writer type: BOOLEAN", "/"),
-            (STRING_SCHEMA, INT_SCHEMA, "reader type: STRING not compatible with writer type: INT", "/"),
-            (BYTES_SCHEMA, NULL_SCHEMA, "reader type: BYTES not compatible with writer type: NULL", "/"),
-            (BYTES_SCHEMA, INT_SCHEMA, "reader type: BYTES not compatible with writer type: INT", "/"),
-            (A_INT_RECORD1, INT_SCHEMA, "reader type: RECORD not compatible with writer type: INT", "/"),
-            (INT_ARRAY_SCHEMA, LONG_ARRAY_SCHEMA, "reader type: INT not compatible with writer type: LONG", "/items"),
-            (INT_MAP_SCHEMA, INT_ARRAY_SCHEMA, "reader type: MAP not compatible with writer type: ARRAY", "/"),
-            (INT_ARRAY_SCHEMA, INT_MAP_SCHEMA, "reader type: ARRAY not compatible with writer type: MAP", "/"),
-            (INT_MAP_SCHEMA, LONG_MAP_SCHEMA, "reader type: INT not compatible with writer type: LONG", "/values"),
-            (INT_SCHEMA, ENUM2_AB_SCHEMA, "reader type: INT not compatible with writer type: ENUM", "/"),
-            (ENUM2_AB_SCHEMA, INT_SCHEMA, "reader type: ENUM not compatible with writer type: INT", "/"),
-            (
-                FLOAT_SCHEMA, INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA, "reader type: FLOAT not compatible with writer type: DOUBLE",
-                "/"
-            ),
-            (LONG_SCHEMA, INT_FLOAT_UNION_SCHEMA, "reader type: LONG not compatible with writer type: FLOAT", "/"),
-            (INT_SCHEMA, INT_FLOAT_UNION_SCHEMA, "reader type: INT not compatible with writer type: FLOAT", "/"),
-            # (INT_LIST_RECORD, LONG_LIST_RECORD, "reader type: INT not compatible with writer type: LONG", "/fields/0/type"),
-            (NULL_SCHEMA, INT_SCHEMA, "reader type: NULL not compatible with writer type: INT", "/"),
+            (
+                NULL_SCHEMA,
+                INT_SCHEMA,
+                "reader type: null not compatible with writer type: int",
+                "/",
+            ),
+            (
+                NULL_SCHEMA,
+                LONG_SCHEMA,
+                "reader type: null not compatible with writer type: long",
+                "/",
+            ),
+            (
+                BOOLEAN_SCHEMA,
+                INT_SCHEMA,
+                "reader type: boolean not compatible with writer type: int",
+                "/",
+            ),
+            (
+                INT_SCHEMA,
+                NULL_SCHEMA,
+                "reader type: int not compatible with writer type: null",
+                "/",
+            ),
+            (
+                INT_SCHEMA,
+                BOOLEAN_SCHEMA,
+                "reader type: int not compatible with writer type: boolean",
+                "/",
+            ),
+            (
+                INT_SCHEMA,
+                LONG_SCHEMA,
+                "reader type: int not compatible with writer type: long",
+                "/",
+            ),
+            (
+                INT_SCHEMA,
+                FLOAT_SCHEMA,
+                "reader type: int not compatible with writer type: float",
+                "/",
+            ),
+            (
+                INT_SCHEMA,
+                DOUBLE_SCHEMA,
+                "reader type: int not compatible with writer type: double",
+                "/",
+            ),
+            (
+                LONG_SCHEMA,
+                FLOAT_SCHEMA,
+                "reader type: long not compatible with writer type: float",
+                "/",
+            ),
+            (
+                LONG_SCHEMA,
+                DOUBLE_SCHEMA,
+                "reader type: long not compatible with writer type: double",
+                "/",
+            ),
+            (
+                FLOAT_SCHEMA,
+                DOUBLE_SCHEMA,
+                "reader type: float not compatible with writer type: double",
+                "/",
+            ),
+            (
+                DOUBLE_SCHEMA,
+                STRING_SCHEMA,
+                "reader type: double not compatible with writer type: string",
+                "/",
+            ),
+            (
+                FIXED_4_BYTES,
+                STRING_SCHEMA,
+                "reader type: fixed not compatible with writer type: string",
+                "/",
+            ),
+            (
+                STRING_SCHEMA,
+                BOOLEAN_SCHEMA,
+                "reader type: string not compatible with writer type: boolean",
+                "/",
+            ),
+            (
+                STRING_SCHEMA,
+                INT_SCHEMA,
+                "reader type: string not compatible with writer type: int",
+                "/",
+            ),
+            (
+                BYTES_SCHEMA,
+                NULL_SCHEMA,
+                "reader type: bytes not compatible with writer type: null",
+                "/",
+            ),
+            (
+                BYTES_SCHEMA,
+                INT_SCHEMA,
+                "reader type: bytes not compatible with writer type: int",
+                "/",
+            ),
+            (
+                A_INT_RECORD1,
+                INT_SCHEMA,
+                "reader type: record not compatible with writer type: int",
+                "/",
+            ),
+            (
+                INT_ARRAY_SCHEMA,
+                LONG_ARRAY_SCHEMA,
+                "reader type: int not compatible with writer type: long",
+                "/items",
+            ),
+            (
+                INT_MAP_SCHEMA,
+                INT_ARRAY_SCHEMA,
+                "reader type: map not compatible with writer type: array",
+                "/",
+            ),
+            (
+                INT_ARRAY_SCHEMA,
+                INT_MAP_SCHEMA,
+                "reader type: array not compatible with writer type: map",
+                "/",
+            ),
+            (
+                INT_MAP_SCHEMA,
+                LONG_MAP_SCHEMA,
+                "reader type: int not compatible with writer type: long",
+                "/values",
+            ),
+            (
+                INT_SCHEMA,
+                ENUM2_AB_SCHEMA,
+                "reader type: int not compatible with writer type: enum",
+                "/",
+            ),
+            (
+                ENUM2_AB_SCHEMA,
+                INT_SCHEMA,
+                "reader type: enum not compatible with writer type: int",
+                "/",
+            ),
+            (
+                FLOAT_SCHEMA,
+                INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA,
+                "reader type: float not compatible with writer type: double",
+                "/",
+            ),
+            (
+                LONG_SCHEMA,
+                INT_FLOAT_UNION_SCHEMA,
+                "reader type: long not compatible with writer type: float",
+                "/",
+            ),
+            (
+                INT_SCHEMA,
+                INT_FLOAT_UNION_SCHEMA,
+                "reader type: int not compatible with writer type: float",
+                "/",
+            ),
+            # (INT_LIST_RECORD, LONG_LIST_RECORD, "reader type: int not compatible with writer type: long", "/fields/0/type"),
+            (
+                NULL_SCHEMA,
+                INT_SCHEMA,
+                "reader type: null not compatible with writer type: int",
+                "/",
+            ),
         ]
         for (reader, writer, message, location) in incompatible_pairs:
             result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
@@ -724,5 +1040,4 @@ class TestCompatibility(unittest.TestCase):
             self.assertIn(location, result.locations)
 
     def are_compatible(self, reader: Schema, writer: Schema) -> bool:
-        return ReaderWriterCompatibilityChecker(
-        ).get_compatibility(reader, writer).compatibility is SchemaCompatibilityType.compatible
+        return ReaderWriterCompatibilityChecker().get_compatibility(reader, writer).compatibility is SchemaCompatibilityType.compatible
diff --git a/lang/py/avro/test/test_datafile.py b/lang/py/avro/test/test_datafile.py
index e720eda..5bb0d28 100644
--- a/lang/py/avro/test/test_datafile.py
+++ b/lang/py/avro/test/test_datafile.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -31,28 +29,32 @@ import avro.io
 import avro.schema
 
 CODECS_TO_VALIDATE = avro.codecs.supported_codec_names()
-TEST_PAIRS = tuple((avro.schema.parse(schema), datum) for schema, datum in (
-    ('"null"', None),
-    ('"boolean"', True),
-    ('"string"', 'adsfasdf09809dsf-=adsf'),
-    ('"bytes"', b'12345abcd'),
-    ('"int"', 1234),
-    ('"long"', 1234),
-    ('"float"', 1234.0),
-    ('"double"', 1234.0),
-    ('{"type": "fixed", "name": "Test", "size": 1}', b'B'),
-    ('{"type": "enum", "name": "Test", "symbols": ["A", "B"]}', 'B'),
-    ('{"type": "array", "items": "long"}', [1, 3, 2]),
-    ('{"type": "map", "values": "long"}', {'a': 1,
-                                           'b': 3,
-                                           'c': 2}),
-    ('["string", "null", "long"]', None),
-    ("""\
+TEST_PAIRS = tuple(
+    (avro.schema.parse(schema), datum)
+    for schema, datum in (
+        ('"null"', None),
+        ('"boolean"', True),
+        ('"string"', "adsfasdf09809dsf-=adsf"),
+        ('"bytes"', b"12345abcd"),
+        ('"int"', 1234),
+        ('"long"', 1234),
+        ('"float"', 1234.0),
+        ('"double"', 1234.0),
+        ('{"type": "fixed", "name": "Test", "size": 1}', b"B"),
+        ('{"type": "enum", "name": "Test", "symbols": ["A", "B"]}', "B"),
+        ('{"type": "array", "items": "long"}', [1, 3, 2]),
+        ('{"type": "map", "values": "long"}', {"a": 1, "b": 3, "c": 2}),
+        ('["string", "null", "long"]', None),
+        (
+            """\
    {"type": "record",
     "name": "Test",
     "fields": [{"name": "f", "type": "long"}]}
-   """, {'f': 5}),
-    ("""\
+   """,
+            {"f": 5},
+        ),
+        (
+            """\
    {"type": "record",
     "name": "Lisp",
     "fields": [{"name": "value",
@@ -61,18 +63,21 @@ TEST_PAIRS = tuple((avro.schema.parse(schema), datum) for schema, datum in (
                           "name": "Cons",
                           "fields": [{"name": "car", "type": "Lisp"},
                                      {"name": "cdr", "type": "Lisp"}]}]}]}
-   """, {'value': {'car': {'value': 'head'}, 'cdr': {'value': None}}}),
-))
+   """,
+            {"value": {"car": {"value": "head"}, "cdr": {"value": None}}},
+        ),
+    )
+)
 
 
 @contextlib.contextmanager
-def writer(path, schema=None, codec=avro.datafile.NULL_CODEC, mode='wb'):
+def writer(path, schema=None, codec=avro.datafile.NULL_CODEC, mode="wb"):
     with avro.datafile.DataFileWriter(open(path, mode), avro.io.DatumWriter(), schema, codec) as dfw:
         yield dfw
 
 
 @contextlib.contextmanager
-def reader(path, mode='rb'):
+def reader(path, mode="rb"):
     with avro.datafile.DataFileReader(open(path, mode), avro.io.DatumReader()) as dfr:
         yield dfr
 
@@ -86,7 +91,7 @@ class TestDataFile(unittest.TestCase):
 
     def tempfile(self):
         """Generate a tempfile and register it for cleanup."""
-        with tempfile.NamedTemporaryFile(delete=False, suffix='.avro') as f:
+        with tempfile.NamedTemporaryFile(delete=False, suffix=".avro") as f:
             pass
         self.files.append(f.name)
         return f.name
@@ -97,7 +102,7 @@ class TestDataFile(unittest.TestCase):
             os.unlink(f)
 
     def test_append(self):
-        '''A datafile can be written to, appended to, and read from.'''
+        """A datafile can be written to, appended to, and read from."""
         for codec in CODECS_TO_VALIDATE:
             for schema, datum in TEST_PAIRS:
                 # write data in binary to file once
@@ -107,7 +112,7 @@ class TestDataFile(unittest.TestCase):
 
                 # open file, write, and close nine times
                 for _ in range(9):
-                    with writer(path, mode='ab+') as dfw:
+                    with writer(path, mode="ab+") as dfw:
                         dfw.append(datum)
 
                 # read data in binary from file
@@ -118,7 +123,7 @@ class TestDataFile(unittest.TestCase):
                 self.assertEqual(data, [datum] * 10)
 
     def test_round_trip(self):
-        '''A datafile can be written to and read from.'''
+        """A datafile can be written to and read from."""
         for codec in CODECS_TO_VALIDATE:
             for schema, datum in TEST_PAIRS:
                 # write data in binary to file 10 times
@@ -135,7 +140,7 @@ class TestDataFile(unittest.TestCase):
                 self.assertEqual(data, [datum] * 10)
 
     def test_context_manager(self):
-        '''A datafile closes its buffer object when it exits a with block.'''
+        """A datafile closes its buffer object when it exits a with block."""
         path = self.tempfile()
         for schema, _ in TEST_PAIRS:
             with writer(path, schema) as dfw:
@@ -147,15 +152,15 @@ class TestDataFile(unittest.TestCase):
             self.assertTrue(dfr.reader.closed)
 
     def test_metadata(self):
-        '''Metadata can be written to a datafile, and read from it later.'''
+        """Metadata can be written to a datafile, and read from it later."""
         path = self.tempfile()
         for schema, _ in TEST_PAIRS:
             with writer(path, schema) as dfw:
-                dfw.set_meta('test.string', b'foo')
-                dfw.set_meta('test.number', b'1')
+                dfw.set_meta("test.string", b"foo")
+                dfw.set_meta("test.number", b"1")
             with reader(path) as dfr:
-                self.assertEqual(b'foo', dfr.get_meta('test.string'))
-                self.assertEqual(b'1', dfr.get_meta('test.number'))
+                self.assertEqual(b"foo", dfr.get_meta("test.string"))
+                self.assertEqual(b"1", dfr.get_meta("test.number"))
 
     def test_empty_datafile(self):
         """A reader should not fail to read a file consisting of a single empty block."""
diff --git a/lang/py/avro/test/test_datafile_interop.py b/lang/py/avro/test/test_datafile_interop.py
index ec92ebd..382520c 100644
--- a/lang/py/avro/test/test_datafile_interop.py
+++ b/lang/py/avro/test/test_datafile_interop.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -26,34 +24,31 @@ import avro
 import avro.datafile
 import avro.io
 
-_INTEROP_DATA_DIR = os.path.join(os.path.dirname(avro.__file__), 'test', 'interop', 'data')
+_INTEROP_DATA_DIR = os.path.join(os.path.dirname(avro.__file__), "test", "interop", "data")
 
 
-@unittest.skipUnless(os.path.exists(_INTEROP_DATA_DIR),
-                     "{} does not exist".format(_INTEROP_DATA_DIR))
+@unittest.skipUnless(os.path.exists(_INTEROP_DATA_DIR), f"{_INTEROP_DATA_DIR} does not exist")
 class TestDataFileInterop(unittest.TestCase):
     def test_interop(self):
         """Test Interop"""
         for f in os.listdir(_INTEROP_DATA_DIR):
             filename = os.path.join(_INTEROP_DATA_DIR, f)
             assert os.stat(filename).st_size > 0
-            base_ext = os.path.splitext(os.path.basename(f))[0].split('_', 1)
+            base_ext = os.path.splitext(os.path.basename(f))[0].split("_", 1)
             if len(base_ext) < 2 or base_ext[1] in avro.datafile.VALID_CODECS:
-                print('READING %s' % f)
-                print()
+                print(f"READING {f}\n")
 
                 # read data in binary from file
                 datum_reader = avro.io.DatumReader()
-                with open(filename, 'rb') as reader:
+                with open(filename, "rb") as reader:
                     dfr = avro.datafile.DataFileReader(reader, datum_reader)
                     i = 0
                     for i, datum in enumerate(dfr, 1):
                         assert datum is not None
                     assert i > 0
             else:
-                print('SKIPPING %s due to an unsupported codec' % f)
-                print()
+                print(f"SKIPPING {f} due to an unsupported codec\n")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/lang/py/avro/test/test_init.py b/lang/py/avro/test/test_init.py
index 7151a52..edd14c8 100644
--- a/lang/py/avro/test/test_init.py
+++ b/lang/py/avro/test/test_init.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -25,11 +23,10 @@ import avro
 
 
 class TestVersion(unittest.TestCase):
-
     def test_import_version(self):
         # make sure we have __version__ attribute in avro module
-        self.assertTrue(hasattr(avro, '__version__'))
+        self.assertTrue(hasattr(avro, "__version__"))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/lang/py/avro/test/test_io.py b/lang/py/avro/test/test_io.py
index 9b39382..973f51d 100644
--- a/lang/py/avro/test/test_io.py
+++ b/lang/py/avro/test/test_io.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -97,9 +96,7 @@ SCHEMAS_TO_VALIDATE = tuple(
         ),
         (
             {"type": "long", "logicalType": "timestamp-millis"},
-            datetime.datetime(
-                9999, 12, 31, 23, 59, 59, 999000, tzinfo=avro.timezones.utc
-            ),
+            datetime.datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=avro.timezones.utc),
         ),
         (
             {"type": "long", "logicalType": "timestamp-millis"},
@@ -111,9 +108,7 @@ SCHEMAS_TO_VALIDATE = tuple(
         ),
         (
             {"type": "long", "logicalType": "timestamp-micros"},
-            datetime.datetime(
-                9999, 12, 31, 23, 59, 59, 999999, tzinfo=avro.timezones.utc
-            ),
+            datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=avro.timezones.utc),
         ),
         (
             {"type": "long", "logicalType": "timestamp-micros"},
@@ -285,9 +280,7 @@ class RoundTripTestCase(unittest.TestCase):
         A datum should be the same after being encoded and then decoded.
         """
         with warnings.catch_warnings(record=True) as actual_warnings:
-            writer, encoder, datum_writer = write_datum(
-                self.test_datum, self.test_schema
-            )
+            writer, encoder, datum_writer = write_datum(self.test_datum, self.test_schema)
             round_trip_datum = read_datum(writer, self.test_schema)
             expected, round_trip, message = (
                 (
@@ -327,9 +320,7 @@ class BinaryEncodingTestCase(unittest.TestCase):
 
     def check_binary_encoding(self):
         with warnings.catch_warnings(record=True) as actual_warnings:
-            writer, encoder, datum_writer = write_datum(
-                self.test_datum, self.writers_schema
-            )
+            writer, encoder, datum_writer = write_datum(self.test_datum, self.writers_schema)
             writer.seek(0)
             hex_val = avro_hexlify(writer)
             self.assertEqual(
@@ -342,9 +333,7 @@ class BinaryEncodingTestCase(unittest.TestCase):
         VALUE_TO_READ = 6253
         with warnings.catch_warnings(record=True) as actual_warnings:
             # write the value to skip and a known value
-            writer, encoder, datum_writer = write_datum(
-                self.test_datum, self.writers_schema
-            )
+            writer, encoder, datum_writer = write_datum(self.test_datum, self.writers_schema)
             datum_writer.write(VALUE_TO_READ, encoder)
 
             # skip the value
@@ -443,9 +432,7 @@ class TestMisc(unittest.TestCase):
                 }
             )
         )
-        self.assertRaises(
-            avro.errors.AvroOutOfScaleException, write_datum, datum, schema
-        )
+        self.assertRaises(avro.errors.AvroOutOfScaleException, write_datum, datum, schema)
 
     def test_decimal_fixed_small_scale(self):
         """Avro should raise an AvroTypeException when attempting to write a decimal with a larger exponent than the schema's scale."""
@@ -464,26 +451,18 @@ class TestMisc(unittest.TestCase):
                 }
             )
         )
-        self.assertRaises(
-            avro.errors.AvroOutOfScaleException, write_datum, datum, schema
-        )
+        self.assertRaises(avro.errors.AvroOutOfScaleException, write_datum, datum, schema)
 
     def test_unknown_symbol(self):
         datum_to_write = "FOO"
-        writers_schema = avro.schema.parse(
-            json.dumps({"type": "enum", "name": "Test", "symbols": ["FOO", "BAR"]})
-        )
-        readers_schema = avro.schema.parse(
-            json.dumps({"type": "enum", "name": "Test", "symbols": ["BAR", "BAZ"]})
-        )
+        writers_schema = avro.schema.parse(json.dumps({"type": "enum", "name": "Test", "symbols": ["FOO", "BAR"]}))
+        readers_schema = avro.schema.parse(json.dumps({"type": "enum", "name": "Test", "symbols": ["BAR", "BAZ"]}))
 
         writer, encoder, datum_writer = write_datum(datum_to_write, writers_schema)
         reader = io.BytesIO(writer.getvalue())
         decoder = avro.io.BinaryDecoder(reader)
         datum_reader = avro.io.DatumReader(writers_schema, readers_schema)
-        self.assertRaises(
-            avro.errors.SchemaResolutionException, datum_reader.read, decoder
-        )
+        self.assertRaises(avro.errors.SchemaResolutionException, datum_reader.read, decoder)
 
     def test_no_default_value(self):
         writers_schema = LONG_RECORD_SCHEMA
@@ -503,9 +482,7 @@ class TestMisc(unittest.TestCase):
         reader = io.BytesIO(writer.getvalue())
         decoder = avro.io.BinaryDecoder(reader)
         datum_reader = avro.io.DatumReader(writers_schema, readers_schema)
-        self.assertRaises(
-            avro.errors.SchemaResolutionException, datum_reader.read, decoder
-        )
+        self.assertRaises(avro.errors.SchemaResolutionException, datum_reader.read, decoder)
 
     def test_projection(self):
         writers_schema = LONG_RECORD_SCHEMA
@@ -565,39 +542,22 @@ class TestMisc(unittest.TestCase):
             )
         )
         datum_to_write = {"E": 5, "F": "Bad"}
-        self.assertRaises(
-            avro.errors.AvroTypeException, write_datum, datum_to_write, writers_schema
-        )
+        self.assertRaises(avro.errors.AvroTypeException, write_datum, datum_to_write, writers_schema)
 
 
 def load_tests(loader, default_tests, pattern):
     """Generate test cases across many test schema."""
     suite = unittest.TestSuite()
     suite.addTests(loader.loadTestsFromTestCase(TestMisc))
-    suite.addTests(
-        IoValidateTestCase(schema_str, datum)
-        for schema_str, datum in SCHEMAS_TO_VALIDATE
-    )
-    suite.addTests(
-        RoundTripTestCase(schema_str, datum)
-        for schema_str, datum in SCHEMAS_TO_VALIDATE
-    )
+    suite.addTests(IoValidateTestCase(schema_str, datum) for schema_str, datum in SCHEMAS_TO_VALIDATE)
+    suite.addTests(RoundTripTestCase(schema_str, datum) for schema_str, datum in SCHEMAS_TO_VALIDATE)
     for skip in False, True:
         for type_ in "int", "long":
-            suite.addTests(
-                BinaryEncodingTestCase(skip, type_, datum, hex_)
-                for datum, hex_ in BINARY_ENCODINGS
-            )
-    suite.addTests(
-        SchemaPromotionTestCase(write_type, read_type)
-        for write_type, read_type in itertools.combinations(
-            ("int", "long", "float", "double"), 2
-        )
-    )
+            suite.addTests(BinaryEncodingTestCase(skip, type_, datum, hex_) for datum, hex_ in BINARY_ENCODINGS)
     suite.addTests(
-        DefaultValueTestCase(field_type, default)
-        for field_type, default in DEFAULT_VALUE_EXAMPLES
+        SchemaPromotionTestCase(write_type, read_type) for write_type, read_type in itertools.combinations(("int", "long", "float", "double"), 2)
     )
+    suite.addTests(DefaultValueTestCase(field_type, default) for field_type, default in DEFAULT_VALUE_EXAMPLES)
     return suite
 
 
diff --git a/lang/py/avro/test/test_ipc.py b/lang/py/avro/test/test_ipc.py
index 0957d49..59006d7 100644
--- a/lang/py/avro/test/test_ipc.py
+++ b/lang/py/avro/test/test_ipc.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -34,12 +32,12 @@ class TestIPC(unittest.TestCase):
         pass
 
     def test_server_with_path(self):
-        client_with_custom_path = avro.ipc.HTTPTransceiver('apache.org', 80, '/service/article')
-        self.assertEqual('/service/article', client_with_custom_path.req_resource)
+        client_with_custom_path = avro.ipc.HTTPTransceiver("apache.org", 80, "/service/article")
+        self.assertEqual("/service/article", client_with_custom_path.req_resource)
 
-        client_with_default_path = avro.ipc.HTTPTransceiver('apache.org', 80)
-        self.assertEqual('/', client_with_default_path.req_resource)
+        client_with_default_path = avro.ipc.HTTPTransceiver("apache.org", 80)
+        self.assertEqual("/", client_with_default_path.req_resource)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/lang/py/avro/test/test_protocol.py b/lang/py/avro/test/test_protocol.py
index 57118c3..392da6b 100644
--- a/lang/py/avro/test/test_protocol.py
+++ b/lang/py/avro/test/test_protocol.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -32,7 +30,7 @@ import avro.schema
 class TestProtocol:
     """A proxy for a protocol string that provides useful test metadata."""
 
-    def __init__(self, data, name='', comment=''):
+    def __init__(self, data, name="", comment=""):
         if not isinstance(data, str):
             data = json.dumps(data)
         self.data = data
@@ -48,211 +46,330 @@ class TestProtocol:
 
 class ValidTestProtocol(TestProtocol):
     """A proxy for a valid protocol string that provides useful test metadata."""
+
     valid = True
 
 
 class InvalidTestProtocol(TestProtocol):
     """A proxy for an invalid protocol string that provides useful test metadata."""
+
     valid = False
 
 
-HELLO_WORLD = ValidTestProtocol({
-    "namespace": "com.acme",
-    "protocol": "HelloWorld",
-    "types": [
-        {"name": "Greeting", "type": "record", "fields": [
-            {"name": "message", "type": "string"}]},
-        {"name": "Curse", "type": "error", "fields": [
-            {"name": "message", "type": "string"}]}
-    ],
-    "messages": {
-        "hello": {
-            "request": [{"name": "greeting", "type": "Greeting"}],
-            "response": "Greeting",
-            "errors": ["Curse"]
-        }
+HELLO_WORLD = ValidTestProtocol(
+    {
+        "namespace": "com.acme",
+        "protocol": "HelloWorld",
+        "types": [
+            {
+                "name": "Greeting",
+                "type": "record",
+                "fields": [{"name": "message", "type": "string"}],
+            },
+            {
+                "name": "Curse",
+                "type": "error",
+                "fields": [{"name": "message", "type": "string"}],
+            },
+        ],
+        "messages": {
+            "hello": {
+                "request": [{"name": "greeting", "type": "Greeting"}],
+                "response": "Greeting",
+                "errors": ["Curse"],
+            }
+        },
     }
-})
-EXAMPLES = [HELLO_WORLD, ValidTestProtocol({
-    "namespace": "org.apache.avro.test",
-    "protocol": "Simple",
-    "types": [
-        {"name": "Kind", "type": "enum", "symbols": ["FOO", "BAR", "BAZ"]},
-        {"name": "MD5", "type": "fixed", "size": 16},
-        {"name": "TestRecord", "type": "record", "fields": [
-            {"name": "name", "type": "string", "order": "ignore"},
-            {"name": "kind", "type": "Kind", "order": "descending"},
-            {"name": "hash", "type": "MD5"}
-        ]},
-        {"name": "TestError", "type": "error", "fields": [{"name": "message", "type": "string"}]}
-    ],
-    "messages": {
-        "hello": {
-            "request": [{"name": "greeting", "type": "string"}],
-            "response": "string"
-        }, "echo": {
-            "request": [{"name": "record", "type": "TestRecord"}],
-            "response": "TestRecord"
-        }, "add": {
-            "request": [{"name": "arg1", "type": "int"}, {"name": "arg2", "type": "int"}],
-            "response": "int"
-        }, "echoBytes": {
-            "request": [{"name": "data", "type": "bytes"}],
-            "response": "bytes"
-        }, "error": {
-            "request": [],
-            "response": "null",
-            "errors": ["TestError"]
+)
+EXAMPLES = [
+    HELLO_WORLD,
+    ValidTestProtocol(
+        {
+            "namespace": "org.apache.avro.test",
+            "protocol": "Simple",
+            "types": [
+                {"name": "Kind", "type": "enum", "symbols": ["FOO", "BAR", "BAZ"]},
+                {"name": "MD5", "type": "fixed", "size": 16},
+                {
+                    "name": "TestRecord",
+                    "type": "record",
+                    "fields": [
+                        {"name": "name", "type": "string", "order": "ignore"},
+                        {"name": "kind", "type": "Kind", "order": "descending"},
+                        {"name": "hash", "type": "MD5"},
+                    ],
+                },
+                {
+                    "name": "TestError",
+                    "type": "error",
+                    "fields": [{"name": "message", "type": "string"}],
+                },
+            ],
+            "messages": {
+                "hello": {
+                    "request": [{"name": "greeting", "type": "string"}],
+                    "response": "string",
+                },
+                "echo": {
+                    "request": [{"name": "record", "type": "TestRecord"}],
+                    "response": "TestRecord",
+                },
+                "add": {
+                    "request": [
+                        {"name": "arg1", "type": "int"},
+                        {"name": "arg2", "type": "int"},
+                    ],
+                    "response": "int",
+                },
+                "echoBytes": {
+                    "request": [{"name": "data", "type": "bytes"}],
+                    "response": "bytes",
+                },
+                "error": {"request": [], "response": "null", "errors": ["TestError"]},
+            },
         }
-    }
-}), ValidTestProtocol({
-    "namespace": "org.apache.avro.test.namespace",
-    "protocol": "TestNamespace",
-    "types": [
-        {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
-        {"name": "TestRecord", "type": "record", "fields": [
-            {"name": "hash", "type": "org.apache.avro.test.util.MD5"}
-        ]},
-        {"name": "TestError", "namespace": "org.apache.avro.test.errors", "type": "error",
-         "fields": [{"name": "message", "type": "string"}]}
-    ],
-    "messages": {
-        "echo": {
-            "request": [{"name": "record", "type": "TestRecord"}],
-            "response": "TestRecord"
-        }, "error": {
-            "request": [],
-            "response": "null",
-            "errors": ["org.apache.avro.test.errors.TestError"]
+    ),
+    ValidTestProtocol(
+        {
+            "namespace": "org.apache.avro.test.namespace",
+            "protocol": "TestNamespace",
+            "types": [
+                {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
+                {
+                    "name": "TestRecord",
+                    "type": "record",
+                    "fields": [{"name": "hash", "type": "org.apache.avro.test.util.MD5"}],
+                },
+                {
+                    "name": "TestError",
+                    "namespace": "org.apache.avro.test.errors",
+                    "type": "error",
+                    "fields": [{"name": "message", "type": "string"}],
+                },
+            ],
+            "messages": {
+                "echo": {
+                    "request": [{"name": "record", "type": "TestRecord"}],
+                    "response": "TestRecord",
+                },
+                "error": {
+                    "request": [],
+                    "response": "null",
+                    "errors": ["org.apache.avro.test.errors.TestError"],
+                },
+            },
         }
-    }
-}), ValidTestProtocol({
-    "namespace": "org.apache.avro.test.namespace",
-    "protocol": "TestImplicitNamespace",
-    "types": [
+    ),
+    ValidTestProtocol(
+        {
+            "namespace": "org.apache.avro.test.namespace",
+            "protocol": "TestImplicitNamespace",
+            "types": [
                 {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
-                {"name": "ReferencedRecord", "type": "record",
-                 "fields": [{"name": "foo", "type": "string"}]},
-                {"name": "TestRecord", "type": "record",
-                 "fields": [{"name": "hash", "type": "org.apache.avro.test.util.MD5"},
-                            {"name": "unqualified", "type": "ReferencedRecord"}]
-                 },
-                {"name": "TestError", "type": "error", "fields": [{"name": "message", "type": "string"}]}
-    ],
-    "messages": {
-        "echo": {
-            "request": [{"name": "qualified", "type": "org.apache.avro.test.namespace.TestRecord"}],
-            "response": "TestRecord"
-        }, "error": {
-            "request": [],
-            "response": "null",
-            "errors": ["org.apache.avro.test.namespace.TestError"]
+                {
+                    "name": "ReferencedRecord",
+                    "type": "record",
+                    "fields": [{"name": "foo", "type": "string"}],
+                },
+                {
+                    "name": "TestRecord",
+                    "type": "record",
+                    "fields": [
+                        {"name": "hash", "type": "org.apache.avro.test.util.MD5"},
+                        {"name": "unqualified", "type": "ReferencedRecord"},
+                    ],
+                },
+                {
+                    "name": "TestError",
+                    "type": "error",
+                    "fields": [{"name": "message", "type": "string"}],
+                },
+            ],
+            "messages": {
+                "echo": {
+                    "request": [
+                        {
+                            "name": "qualified",
+                            "type": "org.apache.avro.test.namespace.TestRecord",
+                        }
+                    ],
+                    "response": "TestRecord",
+                },
+                "error": {
+                    "request": [],
+                    "response": "null",
+                    "errors": ["org.apache.avro.test.namespace.TestError"],
+                },
+            },
         }
-    }
-}), ValidTestProtocol({
-    "namespace": "org.apache.avro.test.namespace",
-    "protocol": "TestNamespaceTwo",
-    "types": [
-        {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
-        {"name": "ReferencedRecord", "type": "record",
-         "namespace": "org.apache.avro.other.namespace",
-         "fields": [{"name": "foo", "type": "string"}]},
-        {"name": "TestRecord", "type": "record",
-         "fields": [{"name": "hash", "type": "org.apache.avro.test.util.MD5"},
-                    {"name": "qualified",
-                     "type": "org.apache.avro.other.namespace.ReferencedRecord"}]
-         },
-        {"name": "TestError",
-         "type": "error", "fields": [{"name": "message", "type": "string"}]}],
-    "messages": {
-        "echo": {
-            "request": [{"name": "qualified", "type": "org.apache.avro.test.namespace.TestRecord"}],
-            "response": "TestRecord"
-        }, "error": {
-            "request": [],
-            "response": "null",
-                        "errors": ["org.apache.avro.test.namespace.TestError"]
+    ),
+    ValidTestProtocol(
+        {
+            "namespace": "org.apache.avro.test.namespace",
+            "protocol": "TestNamespaceTwo",
+            "types": [
+                {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
+                {
+                    "name": "ReferencedRecord",
+                    "type": "record",
+                    "namespace": "org.apache.avro.other.namespace",
+                    "fields": [{"name": "foo", "type": "string"}],
+                },
+                {
+                    "name": "TestRecord",
+                    "type": "record",
+                    "fields": [
+                        {"name": "hash", "type": "org.apache.avro.test.util.MD5"},
+                        {
+                            "name": "qualified",
+                            "type": "org.apache.avro.other.namespace.ReferencedRecord",
+                        },
+                    ],
+                },
+                {
+                    "name": "TestError",
+                    "type": "error",
+                    "fields": [{"name": "message", "type": "string"}],
+                },
+            ],
+            "messages": {
+                "echo": {
+                    "request": [
+                        {
+                            "name": "qualified",
+                            "type": "org.apache.avro.test.namespace.TestRecord",
+                        }
+                    ],
+                    "response": "TestRecord",
+                },
+                "error": {
+                    "request": [],
+                    "response": "null",
+                    "errors": ["org.apache.avro.test.namespace.TestError"],
+                },
+            },
         }
-    }
-}), ValidTestProtocol({
-    "namespace": "org.apache.avro.test.namespace",
-    "protocol": "TestValidRepeatedName",
-    "types": [
-        {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
-        {"name": "ReferencedRecord", "type": "record",
-         "namespace": "org.apache.avro.other.namespace",
-         "fields": [{"name": "foo", "type": "string"}]},
-        {"name": "ReferencedRecord", "type": "record",
-         "fields": [{"name": "bar", "type": "double"}]},
-        {"name": "TestError",
-         "type": "error", "fields": [{"name": "message", "type": "string"}]}],
-    "messages": {
-        "echo": {
-            "request": [{"name": "qualified", "type": "ReferencedRecord"}],
-            "response": "org.apache.avro.other.namespace.ReferencedRecord"},
-        "error": {
-            "request": [],
-            "response": "null",
-            "errors": ["org.apache.avro.test.namespace.TestError"]}
-    }
-}), InvalidTestProtocol({
-    "namespace": "org.apache.avro.test.namespace",
-    "protocol": "TestInvalidRepeatedName",
-    "types": [
-        {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
-        {"name": "ReferencedRecord", "type": "record",
-         "fields": [{"name": "foo", "type": "string"}]},
-        {"name": "ReferencedRecord", "type": "record",
-         "fields": [{"name": "bar", "type": "double"}]},
-        {"name": "TestError",
-         "type": "error", "fields": [{"name": "message", "type": "string"}]}],
-    "messages": {
-        "echo": {
-            "request": [{"name": "qualified", "type": "ReferencedRecord"}],
-            "response": "org.apache.avro.other.namespace.ReferencedRecord"
-        }, "error": {
-            "request": [],
-            "response": "null",
-            "errors": ["org.apache.avro.test.namespace.TestError"]
+    ),
+    ValidTestProtocol(
+        {
+            "namespace": "org.apache.avro.test.namespace",
+            "protocol": "TestValidRepeatedName",
+            "types": [
+                {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
+                {
+                    "name": "ReferencedRecord",
+                    "type": "record",
+                    "namespace": "org.apache.avro.other.namespace",
+                    "fields": [{"name": "foo", "type": "string"}],
+                },
+                {
+                    "name": "ReferencedRecord",
+                    "type": "record",
+                    "fields": [{"name": "bar", "type": "double"}],
+                },
+                {
+                    "name": "TestError",
+                    "type": "error",
+                    "fields": [{"name": "message", "type": "string"}],
+                },
+            ],
+            "messages": {
+                "echo": {
+                    "request": [{"name": "qualified", "type": "ReferencedRecord"}],
+                    "response": "org.apache.avro.other.namespace.ReferencedRecord",
+                },
+                "error": {
+                    "request": [],
+                    "response": "null",
+                    "errors": ["org.apache.avro.test.namespace.TestError"],
+                },
+            },
         }
-    }
-}),
-    ValidTestProtocol({
-        "namespace": "org.apache.avro.test",
-        "protocol": "BulkData",
-        "types": [],
-        "messages": {
-            "read": {
-                "request": [],
-                "response": "bytes"
-            }, "write": {
-                "request": [{"name": "data", "type": "bytes"}],
-                "response": "null"
-            }
+    ),
+    InvalidTestProtocol(
+        {
+            "namespace": "org.apache.avro.test.namespace",
+            "protocol": "TestInvalidRepeatedName",
+            "types": [
+                {"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
+                {
+                    "name": "ReferencedRecord",
+                    "type": "record",
+                    "fields": [{"name": "foo", "type": "string"}],
+                },
+                {
+                    "name": "ReferencedRecord",
+                    "type": "record",
+                    "fields": [{"name": "bar", "type": "double"}],
+                },
+                {
+                    "name": "TestError",
+                    "type": "error",
+                    "fields": [{"name": "message", "type": "string"}],
+                },
+            ],
+            "messages": {
+                "echo": {
+                    "request": [{"name": "qualified", "type": "ReferencedRecord"}],
+                    "response": "org.apache.avro.other.namespace.ReferencedRecord",
+                },
+                "error": {
+                    "request": [],
+                    "response": "null",
+                    "errors": ["org.apache.avro.test.namespace.TestError"],
+                },
+            },
+        }
+    ),
+    ValidTestProtocol(
+        {
+            "namespace": "org.apache.avro.test",
+            "protocol": "BulkData",
+            "types": [],
+            "messages": {
+                "read": {"request": [], "response": "bytes"},
+                "write": {
+                    "request": [{"name": "data", "type": "bytes"}],
+                    "response": "null",
+                },
+            },
+        }
+    ),
+    ValidTestProtocol(
+        {
+            "protocol": "API",
+            "namespace": "xyz.api",
+            "types": [
+                {
+                    "type": "enum",
+                    "name": "Symbology",
+                    "namespace": "xyz.api.product",
+                    "symbols": ["OPRA", "CUSIP", "ISIN", "SEDOL"],
+                },
+                {
+                    "type": "record",
+                    "name": "Symbol",
+                    "namespace": "xyz.api.product",
+                    "fields": [
+                        {"name": "symbology", "type": "xyz.api.product.Symbology"},
+                        {"name": "symbol", "type": "string"},
+                    ],
+                },
+                {
+                    "type": "record",
+                    "name": "MultiSymbol",
+                    "namespace": "xyz.api.product",
+                    "fields": [
+                        {
+                            "name": "symbols",
+                            "type": {"type": "map", "values": "xyz.api.product.Symbol"},
+                        }
+                    ],
+                },
+            ],
+            "messages": {},
         }
-    }), ValidTestProtocol({
-        "protocol": "API",
-        "namespace": "xyz.api",
-        "types": [{
-            "type": "enum",
-            "name": "Symbology",
-            "namespace": "xyz.api.product",
-            "symbols": ["OPRA", "CUSIP", "ISIN", "SEDOL"]
-        }, {
-            "type": "record",
-            "name": "Symbol",
-            "namespace": "xyz.api.product",
-            "fields": [{"name": "symbology", "type": "xyz.api.product.Symbology"},
-                       {"name": "symbol", "type": "string"}]
-        }, {
-            "type": "record",
-            "name": "MultiSymbol",
-            "namespace": "xyz.api.product",
-            "fields": [{"name": "symbols",
-                        "type": {"type": "map", "values": "xyz.api.product.Symbol"}}]
-        }],
-        "messages": {}
-    }),
+    ),
 ]
 
 VALID_EXAMPLES = [e for e in EXAMPLES if getattr(e, "valid", False)]
@@ -260,22 +377,22 @@ VALID_EXAMPLES = [e for e in EXAMPLES if getattr(e, "valid", False)]
 
 class TestMisc(unittest.TestCase):
     def test_inner_namespace_set(self):
-        print('')
-        print('TEST INNER NAMESPACE')
-        print('===================')
-        print('')
+        print("")
+        print("TEST INNER NAMESPACE")
+        print("===================")
+        print("")
         proto = HELLO_WORLD.parse()
         self.assertEqual(proto.namespace, "com.acme")
         self.assertEqual(proto.fullname, "com.acme.HelloWorld")
-        greeting_type = proto.types_dict['Greeting']
-        self.assertEqual(greeting_type.namespace, 'com.acme')
+        greeting_type = proto.types_dict["Greeting"]
+        self.assertEqual(greeting_type.namespace, "com.acme")
 
     def test_inner_namespace_not_rendered(self):
         proto = HELLO_WORLD.parse()
-        self.assertEqual('com.acme.Greeting', proto.types[0].fullname)
-        self.assertEqual('Greeting', proto.types[0].name)
+        self.assertEqual("com.acme.Greeting", proto.types[0].fullname)
+        self.assertEqual("Greeting", proto.types[0].name)
         # but there shouldn't be 'namespace' rendered to json on the inner type
-        self.assertFalse('namespace' in proto.to_json()['types'][0])
+        self.assertFalse("namespace" in proto.to_json()["types"][0])
 
 
 class ProtocolParseTestCase(unittest.TestCase):
@@ -287,8 +404,7 @@ class ProtocolParseTestCase(unittest.TestCase):
         ignores this class. The autoloader will ignore this class as long as it has
         no methods starting with `test_`.
         """
-        super(ProtocolParseTestCase, self).__init__(
-            'parse_valid' if test_proto.valid else 'parse_invalid')
+        super().__init__("parse_valid" if test_proto.valid else "parse_invalid")
         self.test_proto = test_proto
 
     def parse_valid(self):
@@ -296,7 +412,7 @@ class ProtocolParseTestCase(unittest.TestCase):
         try:
             self.test_proto.parse()
         except avro.errors.ProtocolParseException:
-            self.fail("Valid protocol failed to parse: {!s}".format(self.test_proto))
+            self.fail(f"Valid protocol failed to parse: {self.test_proto!s}")
 
     def parse_invalid(self):
         """Parsing an invalid schema should error."""
@@ -305,7 +421,7 @@ class ProtocolParseTestCase(unittest.TestCase):
         except (avro.errors.ProtocolParseException, avro.errors.SchemaParseException):
             pass
         else:
-            self.fail("Invalid protocol should not have parsed: {!s}".format(self.test_proto))
+            self.fail(f"Invalid protocol should not have parsed: {self.test_proto!s}")
 
 
 class ErrorSchemaTestCase(unittest.TestCase):
@@ -317,15 +433,14 @@ class ErrorSchemaTestCase(unittest.TestCase):
         ignores this class. The autoloader will ignore this class as long as it has
         no methods starting with `test_`.
         """
-        super(ErrorSchemaTestCase, self).__init__('check_error_schema_exists')
+        super().__init__("check_error_schema_exists")
         self.test_proto = test_proto
 
     def check_error_schema_exists(self):
         """Protocol messages should always have at least a string error schema."""
         p = self.test_proto.parse()
         for k, m in p.messages.items():
-            self.assertIsNotNone(m.errors, "Message {} did not have the expected implicit "
-                                           "string error schema.".format(k))
+            self.assertIsNotNone(m.errors, f"Message {k} did not have the expected implicit string error schema.")
 
 
 class RoundTripParseTestCase(unittest.TestCase):
@@ -337,7 +452,7 @@ class RoundTripParseTestCase(unittest.TestCase):
         ignores this class. The autoloader will ignore this class as long as it has
         no methods starting with `test_`.
         """
-        super(RoundTripParseTestCase, self).__init__('parse_round_trip')
+        super().__init__("parse_round_trip")
         self.test_proto = test_proto
 
     def parse_round_trip(self):
@@ -356,5 +471,5 @@ def load_tests(loader, default_tests, pattern):
     return suite
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/lang/py/avro/test/test_schema.py b/lang/py/avro/test/test_schema.py
index b3b95f4..52ecca5 100644
--- a/lang/py/avro/test/test_schema.py
+++ b/lang/py/avro/test/test_schema.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -32,7 +31,7 @@ import avro.schema
 class TestSchema:
     """A proxy for a schema string that provides useful test metadata."""
 
-    def __init__(self, data, name='', comment='', warnings=None):
+    def __init__(self, data, name="", comment="", warnings=None):
         if not isinstance(data, str):
             data = json.dumps(data)
         self.data = data
@@ -49,19 +48,21 @@ class TestSchema:
 
 class ValidTestSchema(TestSchema):
     """A proxy for a valid schema string that provides useful test metadata."""
+
     valid = True
 
 
 class InvalidTestSchema(TestSchema):
     """A proxy for an invalid schema string that provides useful test metadata."""
+
     valid = False
 
 
 PRIMITIVE_EXAMPLES = [InvalidTestSchema('"True"')]  # type: List[TestSchema]
-PRIMITIVE_EXAMPLES.append(InvalidTestSchema('True'))
+PRIMITIVE_EXAMPLES.append(InvalidTestSchema("True"))
 PRIMITIVE_EXAMPLES.append(InvalidTestSchema('{"no_type": "test"}'))
 PRIMITIVE_EXAMPLES.append(InvalidTestSchema('{"type": "panther"}'))
-PRIMITIVE_EXAMPLES.extend([ValidTestSchema('"{}"'.format(t)) for t in avro.schema.PRIMITIVE_TYPES])
+PRIMITIVE_EXAMPLES.extend([ValidTestSchema(f'"{t}"') for t in avro.schema.PRIMITIVE_TYPES])
 PRIMITIVE_EXAMPLES.extend([ValidTestSchema({"type": t}) for t in avro.schema.PRIMITIVE_TYPES])
 
 FIXED_EXAMPLES = [
@@ -74,23 +75,18 @@ FIXED_EXAMPLES = [
             "namespace": "org.apache.hadoop.avro",
         }
     ),
-    ValidTestSchema(
-        {"type": "fixed", "name": "NullNamespace", "namespace": None, "size": 1}
-    ),
-    ValidTestSchema(
-        {"type": "fixed", "name": "EmptyStringNamespace", "namespace": "", "size": 1}
-    ),
+    ValidTestSchema({"type": "fixed", "name": "NullNamespace", "namespace": None, "size": 1}),
+    ValidTestSchema({"type": "fixed", "name": "EmptyStringNamespace", "namespace": "", "size": 1}),
     InvalidTestSchema({"type": "fixed", "name": "Missing size"}),
     InvalidTestSchema({"type": "fixed", "size": 314}),
-    InvalidTestSchema({"type": "fixed", "size": 314, "name": "dr. spaceman"}, comment='AVRO-621'),
+    InvalidTestSchema({"type": "fixed", "size": 314, "name": "dr. spaceman"}, comment="AVRO-621"),
 ]
 
 ENUM_EXAMPLES = [
     ValidTestSchema({"type": "enum", "name": "Test", "symbols": ["A", "B"]}),
     ValidTestSchema({"type": "enum", "name": "AVRO2174", "symbols": ["nowhitespace"]}),
     InvalidTestSchema({"type": "enum", "name": "Status", "symbols": "Normal Caution Critical"}),
-    InvalidTestSchema({"type": "enum", "name": [0, 1, 1, 2, 3, 5, 8],
-                       "symbols": ["Golden", "Mean"]}),
+    InvalidTestSchema({"type": "enum", "name": [0, 1, 1, 2, 3, 5, 8], "symbols": ["Golden", "Mean"]}),
     InvalidTestSchema({"type": "enum", "symbols": ["I", "will", "fail", "no", "name"]}),
     InvalidTestSchema({"type": "enum", "name": "Test", "symbols": ["AA", "AA"]}),
     InvalidTestSchema({"type": "enum", "name": "AVRO2174", "symbols": ["white space"]}),
@@ -98,229 +94,415 @@ ENUM_EXAMPLES = [
 
 ARRAY_EXAMPLES = [
     ValidTestSchema({"type": "array", "items": "long"}),
-    ValidTestSchema({"type": "array",
-                     "items": {"type": "enum", "name": "Test", "symbols": ["A", "B"]}}),
+    ValidTestSchema(
+        {
+            "type": "array",
+            "items": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
+        }
+    ),
 ]
 
 MAP_EXAMPLES = [
     ValidTestSchema({"type": "map", "values": "long"}),
-    ValidTestSchema({"type": "map",
-                     "values": {"type": "enum", "name": "Test", "symbols": ["A", "B"]}}),
+    ValidTestSchema(
+        {
+            "type": "map",
+            "values": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
+        }
+    ),
 ]
 
 UNION_EXAMPLES = [
     ValidTestSchema(["string", "null", "long"]),
     InvalidTestSchema(["null", "null"]),
     InvalidTestSchema(["long", "long"]),
-    InvalidTestSchema([{"type": "array", "items": "long"},
-                       {"type": "array", "items": "string"}]),
+    InvalidTestSchema([{"type": "array", "items": "long"}, {"type": "array", "items": "string"}]),
 ]
 
 NAMED_IN_UNION_EXAMPLES = [
-  ValidTestSchema({
-    "namespace": "org.apache.avro.test",
-    "type": "record",
-    "name": "Test",
-    "fields": [
+    ValidTestSchema(
         {
-            "type": {
-                "symbols": ["one", "two"],
-                "type": "enum",
-                "name": "NamedEnum"
+            "namespace": "org.apache.avro.test",
+            "type": "record",
+            "name": "Test",
+            "fields": [
+                {
+                    "type": {
+                        "symbols": ["one", "two"],
+                        "type": "enum",
+                        "name": "NamedEnum",
+                    },
+                    "name": "thenamedenum",
                 },
-            "name": "thenamedenum"
-        },
-        {
-            "type": ["null", "NamedEnum"],
-            "name": "unionwithreftoenum"
+                {"type": ["null", "NamedEnum"], "name": "unionwithreftoenum"},
+            ],
         }
-    ]
-    })
+    )
 ]
 
 RECORD_EXAMPLES = [
     ValidTestSchema({"type": "record", "name": "Test", "fields": [{"name": "f", "type": "long"}]}),
     ValidTestSchema({"type": "error", "name": "Test", "fields": [{"name": "f", "type": "long"}]}),
-    ValidTestSchema({"type": "record", "name": "Node",
-                     "fields": [
-                         {"name": "label", "type": "string"},
-                         {"name": "children", "type": {"type": "array", "items": "Node"}}]}),
-    ValidTestSchema({"type": "record", "name": "Lisp",
-                     "fields": [{"name": "value",
-                                 "type": ["null", "string",
-                                          {"type": "record", "name": "Cons",
-                                           "fields": [{"name": "car", "type": "Lisp"},
-                                                      {"name": "cdr", "type": "Lisp"}]}]}]}),
-    ValidTestSchema({"type": "record", "name": "HandshakeRequest",
-                     "namespace": "org.apache.avro.ipc",
-                     "fields": [{"name": "clientHash",
-                                 "type": {"type": "fixed", "name": "MD5", "size": 16}},
-                                {"name": "clientProtocol", "type": ["null", "string"]},
-                                {"name": "serverHash", "type": "MD5"},
-                                {"name": "meta",
-                                 "type": ["null", {"type": "map", "values": "bytes"}]}]}),
-    ValidTestSchema({"type": "record", "name": "HandshakeResponse",
-                     "namespace": "org.apache.avro.ipc",
-                     "fields": [{"name": "match",
-                                 "type": {"type": "enum", "name": "HandshakeMatch",
-                                          "symbols": ["BOTH", "CLIENT", "NONE"]}},
-                                {"name": "serverProtocol", "type": ["null", "string"]},
-                                {"name": "serverHash",
-                                 "type": ["null", {"name": "MD5", "size": 16, "type": "fixed"}]},
-                                {"name": "meta",
-                                 "type": ["null", {"type": "map", "values": "bytes"}]}]}),
-    ValidTestSchema({"type": "record",
-                     "name": "Interop",
-                     "namespace": "org.apache.avro",
-                     "fields": [{"name": "intField", "type": "int"},
-                                {"name": "longField", "type": "long"},
-                                {"name": "stringField", "type": "string"},
-                                {"name": "boolField", "type": "boolean"},
-                                {"name": "floatField", "type": "float"},
-                                {"name": "doubleField", "type": "double"},
-                                {"name": "bytesField", "type": "bytes"},
-                                {"name": "nullField", "type": "null"},
-                                {"name": "arrayField", "type": {"type": "array", "items": "double"}},
-                                {"name": "mapField",
-                                 "type": {"type": "map",
-                                          "values": {"name": "Foo",
-                                                     "type": "record",
-                                                     "fields": [{"name": "label", "type": "string"}]}}},
-                                {"name": "unionField",
-                                 "type": ["boolean", "double", {"type": "array", "items": "bytes"}]},
-                                {"name": "enumField",
-                                 "type": {"type": "enum", "name": "Kind", "symbols": ["A", "B", "C"]}},
-                                {"name": "fixedField",
-                                 "type": {"type": "fixed", "name": "MD5", "size": 16}},
-                                {"name": "recordField",
-                                 "type": {"type": "record", "name": "Node",
-                                          "fields": [{"name": "label", "type": "string"},
-                                                     {"name": "children",
-                                                      "type": {"type": "array",
-                                                               "items": "Node"}}]}}]}),
-    ValidTestSchema({"type": "record", "name": "ipAddr",
-                     "fields": [{"name": "addr", "type": [{"name": "IPv6", "type": "fixed", "size": 16},
-                                                          {"name": "IPv4", "type": "fixed", "size": 4}]}]}),
-    InvalidTestSchema({"type": "record", "name": "Address",
-                       "fields": [{"type": "string"}, {"type": "string", "name": "City"}]}),
-    InvalidTestSchema({"type": "record", "name": "Event",
-                       "fields": [{"name": "Sponsor"}, {"name": "City", "type": "string"}]}),
-    InvalidTestSchema({"type": "record", "name": "Rainer",
-                       "fields": "His vision, from the constantly passing bars"}),
-    InvalidTestSchema({"name": ["Tom", "Jerry"], "type": "record",
-                       "fields": [{"name": "name", "type": "string"}]}),
+    ValidTestSchema(
+        {
+            "type": "record",
+            "name": "Node",
+            "fields": [
+                {"name": "label", "type": "string"},
+                {"name": "children", "type": {"type": "array", "items": "Node"}},
+            ],
+        }
+    ),
+    ValidTestSchema(
+        {
+            "type": "record",
+            "name": "Lisp",
+            "fields": [
+                {
+                    "name": "value",
+                    "type": [
+                        "null",
+                        "string",
+                        {
+                            "type": "record",
+                            "name": "Cons",
+                            "fields": [
+                                {"name": "car", "type": "Lisp"},
+                                {"name": "cdr", "type": "Lisp"},
+                            ],
+                        },
+                    ],
+                }
+            ],
+        }
+    ),
+    ValidTestSchema(
+        {
+            "type": "record",
+            "name": "HandshakeRequest",
+            "namespace": "org.apache.avro.ipc",
+            "fields": [
+                {
+                    "name": "clientHash",
+                    "type": {"type": "fixed", "name": "MD5", "size": 16},
+                },
+                {"name": "clientProtocol", "type": ["null", "string"]},
+                {"name": "serverHash", "type": "MD5"},
+                {"name": "meta", "type": ["null", {"type": "map", "values": "bytes"}]},
+            ],
+        }
+    ),
+    ValidTestSchema(
+        {
+            "type": "record",
+            "name": "HandshakeResponse",
+            "namespace": "org.apache.avro.ipc",
+            "fields": [
+                {
+                    "name": "match",
+                    "type": {
+                        "type": "enum",
+                        "name": "HandshakeMatch",
+                        "symbols": ["BOTH", "CLIENT", "NONE"],
+                    },
+                },
+                {"name": "serverProtocol", "type": ["null", "string"]},
+                {
+                    "name": "serverHash",
+                    "type": ["null", {"name": "MD5", "size": 16, "type": "fixed"}],
+                },
+                {"name": "meta", "type": ["null", {"type": "map", "values": "bytes"}]},
+            ],
+        }
+    ),
+    ValidTestSchema(
+        {
+            "type": "record",
+            "name": "Interop",
+            "namespace": "org.apache.avro",
+            "fields": [
+                {"name": "intField", "type": "int"},
+                {"name": "longField", "type": "long"},
+                {"name": "stringField", "type": "string"},
+                {"name": "boolField", "type": "boolean"},
+                {"name": "floatField", "type": "float"},
+                {"name": "doubleField", "type": "double"},
+                {"name": "bytesField", "type": "bytes"},
+                {"name": "nullField", "type": "null"},
+                {"name": "arrayField", "type": {"type": "array", "items": "double"}},
+                {
+                    "name": "mapField",
+                    "type": {
+                        "type": "map",
+                        "values": {
+                            "name": "Foo",
+                            "type": "record",
+                            "fields": [{"name": "label", "type": "string"}],
+                        },
+                    },
+                },
+                {
+                    "name": "unionField",
+                    "type": ["boolean", "double", {"type": "array", "items": "bytes"}],
+                },
+                {
+                    "name": "enumField",
+                    "type": {
+                        "type": "enum",
+                        "name": "Kind",
+                        "symbols": ["A", "B", "C"],
+                    },
+                },
+                {
+                    "name": "fixedField",
+                    "type": {"type": "fixed", "name": "MD5", "size": 16},
+                },
+                {
+                    "name": "recordField",
+                    "type": {
+                        "type": "record",
+                        "name": "Node",
+                        "fields": [
+                            {"name": "label", "type": "string"},
+                            {
+                                "name": "children",
+                                "type": {"type": "array", "items": "Node"},
+                            },
+                        ],
+                    },
+                },
+            ],
+        }
+    ),
+    ValidTestSchema(
+        {
+            "type": "record",
+            "name": "ipAddr",
+            "fields": [
+                {
+                    "name": "addr",
+                    "type": [
+                        {"name": "IPv6", "type": "fixed", "size": 16},
+                        {"name": "IPv4", "type": "fixed", "size": 4},
+                    ],
+                }
+            ],
+        }
+    ),
+    InvalidTestSchema(
+        {
+            "type": "record",
+            "name": "Address",
+            "fields": [{"type": "string"}, {"type": "string", "name": "City"}],
+        }
+    ),
+    InvalidTestSchema(
+        {
+            "type": "record",
+            "name": "Event",
+            "fields": [{"name": "Sponsor"}, {"name": "City", "type": "string"}],
+        }
+    ),
+    InvalidTestSchema(
+        {
+            "type": "record",
+            "name": "Rainer",
+            "fields": "His vision, from the constantly passing bars",
+        }
+    ),
+    InvalidTestSchema(
+        {
+            "name": ["Tom", "Jerry"],
+            "type": "record",
+            "fields": [{"name": "name", "type": "string"}],
+        }
+    ),
 ]
 
 DOC_EXAMPLES = [
-    ValidTestSchema({"type": "record", "name": "TestDoc", "doc": "Doc string",
-                     "fields": [{"name": "name", "type": "string", "doc": "Doc String"}]}),
+    ValidTestSchema(
+        {
+            "type": "record",
+            "name": "TestDoc",
+            "doc": "Doc string",
+            "fields": [{"name": "name", "type": "string", "doc": "Doc String"}],
+        }
+    ),
     ValidTestSchema({"type": "enum", "name": "Test", "symbols": ["A", "B"], "doc": "Doc String"}),
 ]
 
 OTHER_PROP_EXAMPLES = [
-    ValidTestSchema({"type": "record", "name": "TestRecord", "cp_string": "string",
-                     "cp_int": 1, "cp_array": [1, 2, 3, 4],
-                     "fields": [{"name": "f1", "type": "string", "cp_object": {"a": 1, "b": 2}},
-                                {"name": "f2", "type": "long", "cp_null": None}]}),
+    ValidTestSchema(
+        {
+            "type": "record",
+            "name": "TestRecord",
+            "cp_string": "string",
+            "cp_int": 1,
+            "cp_array": [1, 2, 3, 4],
+            "fields": [
+                {"name": "f1", "type": "string", "cp_object": {"a": 1, "b": 2}},
+                {"name": "f2", "type": "long", "cp_null": None},
+            ],
+        }
+    ),
     ValidTestSchema({"type": "map", "values": "long", "cp_boolean": True}),
-    ValidTestSchema({"type": "enum", "name": "TestEnum",
-                     "symbols": ["one", "two", "three"], "cp_float": 1.0}),
+    ValidTestSchema(
+        {
+            "type": "enum",
+            "name": "TestEnum",
+            "symbols": ["one", "two", "three"],
+            "cp_float": 1.0,
+        }
+    ),
 ]
 
 DECIMAL_LOGICAL_TYPE = [
-    ValidTestSchema({"type": "fixed", "logicalType": "decimal", "name": "TestDecimal", "precision": 4, "size": 10, "scale": 2}),
+    ValidTestSchema(
+        {
+            "type": "fixed",
+            "logicalType": "decimal",
+            "name": "TestDecimal",
+            "precision": 4,
+            "size": 10,
+            "scale": 2,
+        }
+    ),
     ValidTestSchema({"type": "bytes", "logicalType": "decimal", "precision": 4, "scale": 2}),
-    InvalidTestSchema({"type": "fixed", "logicalType": "decimal", "name": "TestDecimal2", "precision": 2, "scale": 2, "size": -2}),
+    InvalidTestSchema(
+        {
+            "type": "fixed",
+            "logicalType": "decimal",
+            "name": "TestDecimal2",
+            "precision": 2,
+            "scale": 2,
+            "size": -2,
+        }
+    ),
 ]
 
-DATE_LOGICAL_TYPE = [
-    ValidTestSchema({"type": "int", "logicalType": "date"})
-]
+DATE_LOGICAL_TYPE = [ValidTestSchema({"type": "int", "logicalType": "date"})]
 
-TIMEMILLIS_LOGICAL_TYPE = [
-    ValidTestSchema({"type": "int", "logicalType": "time-millis"})
-]
+TIMEMILLIS_LOGICAL_TYPE = [ValidTestSchema({"type": "int", "logicalType": "time-millis"})]
 
-TIMEMICROS_LOGICAL_TYPE = [
-    ValidTestSchema({"type": "long", "logicalType": "time-micros"})
-]
+TIMEMICROS_LOGICAL_TYPE = [ValidTestSchema({"type": "long", "logicalType": "time-micros"})]
 
-TIMESTAMPMILLIS_LOGICAL_TYPE = [
-    ValidTestSchema({"type": "long", "logicalType": "timestamp-millis"})
-]
+TIMESTAMPMILLIS_LOGICAL_TYPE = [ValidTestSchema({"type": "long", "logicalType": "timestamp-millis"})]
 
-TIMESTAMPMICROS_LOGICAL_TYPE = [
-    ValidTestSchema({"type": "long", "logicalType": "timestamp-micros"})
-]
+TIMESTAMPMICROS_LOGICAL_TYPE = [ValidTestSchema({"type": "long", "logicalType": "timestamp-micros"})]
 
-UUID_LOGICAL_TYPE = [
-    ValidTestSchema({"type": "string", "logicalType": "uuid"})
-]
+UUID_LOGICAL_TYPE = [ValidTestSchema({"type": "string", "logicalType": "uuid"})]
 
 IGNORED_LOGICAL_TYPE = [
     ValidTestSchema(
         {"type": "string", "logicalType": "unknown-logical-type"},
-        warnings=[avro.errors.IgnoredLogicalType('Unknown unknown-logical-type, using string.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Unknown unknown-logical-type, using string.")],
+    ),
     ValidTestSchema(
         {"type": "bytes", "logicalType": "decimal", "scale": 0},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal precision None. Must be a positive integer.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal precision None. Must be a positive integer.")],
+    ),
     ValidTestSchema(
         {"type": "bytes", "logicalType": "decimal", "precision": 2.4, "scale": 0},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal precision 2.4. Must be a positive integer.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal precision 2.4. Must be a positive integer.")],
+    ),
     ValidTestSchema(
         {"type": "bytes", "logicalType": "decimal", "precision": 2, "scale": -2},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal scale -2. Must be a positive integer.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal scale -2. Must be a positive integer.")],
+    ),
     ValidTestSchema(
         {"type": "bytes", "logicalType": "decimal", "precision": -2, "scale": 2},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal precision -2. Must be a positive integer.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal precision -2. Must be a positive integer.")],
+    ),
     ValidTestSchema(
         {"type": "bytes", "logicalType": "decimal", "precision": 2, "scale": 3},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal scale 3. Cannot be greater than precision 2.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal scale 3. Cannot be greater than precision 2.")],
+    ),
     ValidTestSchema(
-        {"type": "fixed", "logicalType": "decimal", "name": "TestIgnored", "precision": -10, "scale": 2, "size": 5},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal precision -10. Must be a positive integer.')]),
+        {
+            "type": "fixed",
+            "logicalType": "decimal",
+            "name": "TestIgnored",
+            "precision": -10,
+            "scale": 2,
+            "size": 5,
+        },
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal precision -10. Must be a positive integer.")],
+    ),
     ValidTestSchema(
-        {"type": "fixed", "logicalType": "decimal", "name": "TestIgnored", "scale": 2, "size": 5},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal precision None. Must be a positive integer.')]),
+        {
+            "type": "fixed",
+            "logicalType": "decimal",
+            "name": "TestIgnored",
+            "scale": 2,
+            "size": 5,
+        },
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal precision None. Must be a positive integer.")],
+    ),
     ValidTestSchema(
-        {"type": "fixed", "logicalType": "decimal", "name": "TestIgnored", "precision": 2, "scale": 3, "size": 2},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal scale 3. Cannot be greater than precision 2.')]),
+        {
+            "type": "fixed",
+            "logicalType": "decimal",
+            "name": "TestIgnored",
+            "precision": 2,
+            "scale": 3,
+            "size": 2,
+        },
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal scale 3. Cannot be greater than precision 2.")],
+    ),
     ValidTestSchema(
-        {"type": "fixed", "logicalType": "decimal", "name": "TestIgnored", "precision": 311, "size": 129},
-        warnings=[avro.errors.IgnoredLogicalType('Invalid decimal precision 311. Max is 310.')]),
+        {
+            "type": "fixed",
+            "logicalType": "decimal",
+            "name": "TestIgnored",
+            "precision": 311,
+            "size": 129,
+        },
+        warnings=[avro.errors.IgnoredLogicalType("Invalid decimal precision 311. Max is 310.")],
+    ),
     ValidTestSchema(
         {"type": "float", "logicalType": "decimal", "precision": 2, "scale": 0},
-        warnings=[avro.errors.IgnoredLogicalType('Logical type decimal requires literal type bytes/fixed, not float.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Logical type decimal requires literal type bytes/fixed, not float.")],
+    ),
     ValidTestSchema(
         {"type": "int", "logicalType": "date1"},
-        warnings=[avro.errors.IgnoredLogicalType('Unknown date1, using int.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Unknown date1, using int.")],
+    ),
     ValidTestSchema(
         {"type": "long", "logicalType": "date"},
-        warnings=[avro.errors.IgnoredLogicalType('Logical type date requires literal type int, not long.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Logical type date requires literal type int, not long.")],
+    ),
     ValidTestSchema(
         {"type": "int", "logicalType": "time-milis"},
-        warnings=[avro.errors.IgnoredLogicalType('Unknown time-milis, using int.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Unknown time-milis, using int.")],
+    ),
     ValidTestSchema(
         {"type": "long", "logicalType": "time-millis"},
-        warnings=[avro.errors.IgnoredLogicalType('Logical type time-millis requires literal type int, not long.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Logical type time-millis requires literal type int, not long.")],
+    ),
     ValidTestSchema(
         {"type": "long", "logicalType": "time-micro"},
-        warnings=[avro.errors.IgnoredLogicalType('Unknown time-micro, using long.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Unknown time-micro, using long.")],
+    ),
     ValidTestSchema(
         {"type": "int", "logicalType": "time-micros"},
-        warnings=[avro.errors.IgnoredLogicalType('Logical type time-micros requires literal type long, not int.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Logical type time-micros requires literal type long, not int.")],
+    ),
     ValidTestSchema(
         {"type": "long", "logicalType": "timestamp-milis"},
-        warnings=[avro.errors.IgnoredLogicalType('Unknown timestamp-milis, using long.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Unknown timestamp-milis, using long.")],
+    ),
     ValidTestSchema(
         {"type": "int", "logicalType": "timestamp-millis"},
-        warnings=[avro.errors.IgnoredLogicalType('Logical type timestamp-millis requires literal type long, not int.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Logical type timestamp-millis requires literal type long, not int.")],
+    ),
     ValidTestSchema(
         {"type": "long", "logicalType": "timestamp-micro"},
-        warnings=[avro.errors.IgnoredLogicalType('Unknown timestamp-micro, using long.')]),
+        warnings=[avro.errors.IgnoredLogicalType("Unknown timestamp-micro, using long.")],
+    ),
     ValidTestSchema(
         {"type": "int", "logicalType": "timestamp-micros"},
-        warnings=[avro.errors.IgnoredLogicalType('Logical type timestamp-micros requires literal type long, not int.')])
+        warnings=[avro.errors.IgnoredLogicalType("Logical type timestamp-micros requires literal type long, not int.")],
+    ),
 ]
 
 EXAMPLES = PRIMITIVE_EXAMPLES
@@ -350,7 +532,8 @@ class TestMisc(unittest.TestCase):
 
     def test_correct_recursive_extraction(self):
         """A recursive reference within a schema should be the same type every time."""
-        s = avro.schema.parse('''{
+        s = avro.schema.parse(
+            """{
             "type": "record",
             "name": "X",
             "fields": [{
@@ -360,7 +543,8 @@ class TestMisc(unittest.TestCase):
                     "name": "Y",
                     "fields": [{"name": "Z", "type": "X"}]}
             }]
-        }''')
+        }"""
+        )
         t = avro.schema.parse(str(s.fields[0].type))
         # If we've made it this far, the subschema was reasonably stringified; it ccould be reparsed.
         self.assertEqual("X", t.fields[0].type.name)
@@ -377,90 +561,110 @@ class TestMisc(unittest.TestCase):
     def test_name_space_specified(self):
         """Space combines with a name to become the fullname."""
         # name and namespace specified
-        fullname = avro.schema.Name('a', 'o.a.h', None).fullname
-        self.assertEqual(fullname, 'o.a.h.a')
+        fullname = avro.schema.Name("a", "o.a.h", None).fullname
+        self.assertEqual(fullname, "o.a.h.a")
 
     def test_fullname_space_specified(self):
         """When name contains dots, namespace should be ignored."""
-        fullname = avro.schema.Name('a.b.c.d', 'o.a.h', None).fullname
-        self.assertEqual(fullname, 'a.b.c.d')
+        fullname = avro.schema.Name("a.b.c.d", "o.a.h", None).fullname
+        self.assertEqual(fullname, "a.b.c.d")
 
     def test_name_default_specified(self):
         """Default space becomes the namespace when the namespace is None."""
-        fullname = avro.schema.Name('a', None, 'b.c.d').fullname
-        self.assertEqual(fullname, 'b.c.d.a')
+        fullname = avro.schema.Name("a", None, "b.c.d").fullname
+        self.assertEqual(fullname, "b.c.d.a")
 
     def test_fullname_default_specified(self):
         """When a name contains dots, default space should be ignored."""
-        fullname = avro.schema.Name('a.b.c.d', None, 'o.a.h').fullname
-        self.assertEqual(fullname, 'a.b.c.d')
+        fullname = avro.schema.Name("a.b.c.d", None, "o.a.h").fullname
+        self.assertEqual(fullname, "a.b.c.d")
 
     def test_fullname_space_default_specified(self):
         """When a name contains dots, namespace and default space should be ignored."""
-        fullname = avro.schema.Name('a.b.c.d', 'o.a.a', 'o.a.h').fullname
-        self.assertEqual(fullname, 'a.b.c.d')
+        fullname = avro.schema.Name("a.b.c.d", "o.a.a", "o.a.h").fullname
+        self.assertEqual(fullname, "a.b.c.d")
 
     def test_name_space_default_specified(self):
         """When name and space are specified, default space should be ignored."""
-        fullname = avro.schema.Name('a', 'o.a.a', 'o.a.h').fullname
-        self.assertEqual(fullname, 'o.a.a.a')
+        fullname = avro.schema.Name("a", "o.a.a", "o.a.h").fullname
+        self.assertEqual(fullname, "o.a.a.a")
 
     def test_equal_names(self):
         """Equality of names is defined on the fullname and is case-sensitive."""
-        self.assertEqual(avro.schema.Name('a.b.c.d', None, None), avro.schema.Name('d', 'a.b.c', None))
-        self.assertNotEqual(avro.schema.Name('C.d', None, None), avro.schema.Name('c.d', None, None))
+        self.assertEqual(
+            avro.schema.Name("a.b.c.d", None, None),
+            avro.schema.Name("d", "a.b.c", None),
+        )
+        self.assertNotEqual(avro.schema.Name("C.d", None, None), avro.schema.Name("c.d", None, None))
 
     def test_invalid_name(self):
         """The name portion of a fullname, record field names, and enum symbols must:
-           start with [A-Za-z_] and subsequently contain only [A-Za-z0-9_]"""
-        self.assertRaises(avro.errors.InvalidName, avro.schema.Name, 'an especially spacey cowboy', None, None)
-        self.assertRaises(avro.errors.InvalidName, avro.schema.Name, '99 problems but a name aint one', None, None)
+        start with [A-Za-z_] and subsequently contain only [A-Za-z0-9_]"""
+        self.assertRaises(
+            avro.errors.InvalidName,
+            avro.schema.Name,
+            "an especially spacey cowboy",
+            None,
+            None,
+        )
+        self.assertRaises(
+            avro.errors.InvalidName,
+            avro.schema.Name,
+            "99 problems but a name aint one",
+            None,
+            None,
+        )
 
     def test_null_namespace(self):
         """The empty string may be used as a namespace to indicate the null namespace."""
-        name = avro.schema.Name('name', "", None)
+        name = avro.schema.Name("name", "", None)
         self.assertEqual(name.fullname, "name")
         self.assertIsNone(name.space)
 
     def test_exception_is_not_swallowed_on_parse_error(self):
         """A specific exception message should appear on a json parse error."""
-        self.assertRaisesRegexp(avro.errors.SchemaParseException,
-                                r'Error parsing JSON: /not/a/real/file',
-                                avro.schema.parse,
-                                '/not/a/real/file')
+        self.assertRaisesRegexp(
+            avro.errors.SchemaParseException,
+            r"Error parsing JSON: /not/a/real/file",
+            avro.schema.parse,
+            "/not/a/real/file",
+        )
 
     def test_decimal_valid_type(self):
-        fixed_decimal_schema = ValidTestSchema({
-            "type": "fixed",
-            "logicalType": "decimal",
-            "name": "TestDecimal",
-            "precision": 4,
-            "scale": 2,
-            "size": 2})
+        fixed_decimal_schema = ValidTestSchema(
+            {
+                "type": "fixed",
+                "logicalType": "decimal",
+                "name": "TestDecimal",
+                "precision": 4,
+                "scale": 2,
+                "size": 2,
+            }
+        )
 
-        bytes_decimal_schema = ValidTestSchema({
-            "type": "bytes",
-            "logicalType": "decimal",
-            "precision": 4})
+        bytes_decimal_schema = ValidTestSchema({"type": "bytes", "logicalType": "decimal", "precision": 4})
 
         fixed_decimal = fixed_decimal_schema.parse()
-        self.assertEqual(4, fixed_decimal.get_prop('precision'))
-        self.assertEqual(2, fixed_decimal.get_prop('scale'))
-        self.assertEqual(2, fixed_decimal.get_prop('size'))
+        self.assertEqual(4, fixed_decimal.get_prop("precision"))
+        self.assertEqual(2, fixed_decimal.get_prop("scale"))
+        self.assertEqual(2, fixed_decimal.get_prop("size"))
 
         bytes_decimal = bytes_decimal_schema.parse()
-        self.assertEqual(4, bytes_decimal.get_prop('precision'))
-        self.assertEqual(0, bytes_decimal.get_prop('scale'))
+        self.assertEqual(4, bytes_decimal.get_prop("precision"))
+        self.assertEqual(0, bytes_decimal.get_prop("scale"))
 
     def test_fixed_decimal_valid_max_precision(self):
         # An 8 byte number can represent any 18 digit number.
-        fixed_decimal_schema = ValidTestSchema({
-            "type": "fixed",
-            "logicalType": "decimal",
-            "name": "TestDecimal",
-            "precision": 18,
-            "scale": 0,
-            "size": 8})
+        fixed_decimal_schema = ValidTestSchema(
+            {
+                "type": "fixed",
+                "logicalType": "decimal",
+                "name": "TestDecimal",
+                "precision": 18,
+                "scale": 0,
+                "size": 8,
+            }
+        )
 
         fixed_decimal = fixed_decimal_schema.parse()
         self.assertIsInstance(fixed_decimal, avro.schema.FixedSchema)
@@ -469,13 +673,16 @@ class TestMisc(unittest.TestCase):
     def test_fixed_decimal_invalid_max_precision(self):
         # An 8 byte number can't represent every 19 digit number, so the logical
         # type is not applied.
-        fixed_decimal_schema = ValidTestSchema({
-            "type": "fixed",
-            "logicalType": "decimal",
-            "name": "TestDecimal",
-            "precision": 19,
-            "scale": 0,
-            "size": 8})
+        fixed_decimal_schema = ValidTestSchema(
+            {
+                "type": "fixed",
+                "logicalType": "decimal",
+                "name": "TestDecimal",
+                "precision": 19,
+                "scale": 0,
+                "size": 8,
+            }
+        )
 
         fixed_decimal = fixed_decimal_schema.parse()
         self.assertIsInstance(fixed_decimal, avro.schema.FixedSchema)
@@ -483,22 +690,19 @@ class TestMisc(unittest.TestCase):
 
     def test_parse_invalid_symbol(self):
         """Disabling enumschema symbol validation should allow invalid symbols to pass."""
-        test_schema_string = json.dumps({
-            "type": "enum", "name": "AVRO2174", "symbols": ["white space"]})
+        test_schema_string = json.dumps({"type": "enum", "name": "AVRO2174", "symbols": ["white space"]})
 
         try:
             case = avro.schema.parse(test_schema_string, validate_enum_symbols=True)
         except avro.errors.InvalidName:
             pass
         else:
-            self.fail("When enum symbol validation is enabled, "
-                      "an invalid symbol should raise InvalidName.")
+            self.fail("When enum symbol validation is enabled, " "an invalid symbol should raise InvalidName.")
 
         try:
             case = avro.schema.parse(test_schema_string, validate_enum_symbols=False)
         except avro.errors.InvalidName:
-            self.fail("When enum symbol validation is disabled, "
-                      "an invalid symbol should not raise InvalidName.")
+            self.fail("When enum symbol validation is disabled, " "an invalid symbol should not raise InvalidName.")
 
 
 class SchemaParseTestCase(unittest.TestCase):
@@ -510,8 +714,7 @@ class SchemaParseTestCase(unittest.TestCase):
         ignores this class. The autoloader will ignore this class as long as it has
         no methods starting with `test_`.
         """
-        super().__init__(
-            'parse_valid' if test_schema.valid else 'parse_invalid')
+        super().__init__("parse_valid" if test_schema.valid else "parse_invalid")
         self.test_schema = test_schema
         # Never hide repeated warnings when running this test case.
         warnings.simplefilter("always")
@@ -522,7 +725,7 @@ class SchemaParseTestCase(unittest.TestCase):
             try:
                 self.test_schema.parse()
             except (avro.errors.AvroException, avro.errors.SchemaParseException):
-                self.fail("Valid schema failed to parse: {!s}".format(self.test_schema))
+                self.fail(f"Valid schema failed to parse: {self.test_schema!s}")
             actual_messages = [str(wmsg.message) for wmsg in actual_warnings]
             if self.test_schema.warnings:
                 expected_messages = [str(w) for w in self.test_schema.warnings]
@@ -537,7 +740,7 @@ class SchemaParseTestCase(unittest.TestCase):
         except (avro.errors.AvroException, avro.errors.SchemaParseException):
             pass
         else:
-            self.fail("Invalid schema should not have parsed: {!s}".format(self.test_schema))
+            self.fail(f"Invalid schema should not have parsed: {self.test_schema!s}")
 
 
 class RoundTripParseTestCase(unittest.TestCase):
@@ -575,20 +778,24 @@ class DocAttributesTestCase(unittest.TestCase):
         ignores this class. The autoloader will ignore this class as long as it has
         no methods starting with `test_`.
         """
-        super(DocAttributesTestCase, self).__init__('check_doc_attributes')
+        super().__init__("check_doc_attributes")
         self.test_schema = test_schema
 
     def check_doc_attributes(self):
         """Documentation attributes should be preserved."""
         sch = self.test_schema.parse()
-        self.assertIsNotNone(sch.doc, "Failed to preserve 'doc' in schema: {!s}".format(self.test_schema))
-        if sch.type == 'record':
+        self.assertIsNotNone(sch.doc, f"Failed to preserve 'doc' in schema: {self.test_schema!s}")
+        if sch.type == "record":
             for f in sch.fields:
-                self.assertIsNotNone(f.doc, "Failed to preserve 'doc' in fields: {!s}".format(self.test_schema))
+                self.assertIsNotNone(
+                    f.doc,
+                    f"Failed to preserve 'doc' in fields: {self.test_schema!s}",
+                )
 
 
 class OtherAttributesTestCase(unittest.TestCase):
     """Enable generating attribute test cases over all the other-prop test schema."""
+
     _type_map = {
         "cp_array": list,
         "cp_boolean": bool,
@@ -605,7 +812,7 @@ class OtherAttributesTestCase(unittest.TestCase):
         ignores this class. The autoloader will ignore this class as long as it has
         no methods starting with `test_`.
         """
-        super(OtherAttributesTestCase, self).__init__('check_attributes')
+        super().__init__("check_attributes")
         self.test_schema = test_schema
 
     def _check_props(self, props):
@@ -620,9 +827,16 @@ class OtherAttributesTestCase(unittest.TestCase):
         except AttributeError:
             self.fail("Comparing a schema to a non-schema should be False, but not error.")
         round_trip = avro.schema.parse(str(sch))
-        self.assertEqual(sch, round_trip, "A schema should be equal to another schema parsed from the same json.")
-        self.assertEqual(sch.other_props, round_trip.other_props,
-                         "Properties were not preserved in a round-trip parse.")
+        self.assertEqual(
+            sch,
+            round_trip,
+            "A schema should be equal to another schema parsed from the same json.",
+        )
+        self.assertEqual(
+            sch.other_props,
+            round_trip.other_props,
+            "Properties were not preserved in a round-trip parse.",
+        )
         self._check_props(sch.other_props)
         if sch.type == "record":
             field_props = [f.other_props for f in sch.fields if f.other_props]
@@ -633,24 +847,24 @@ class OtherAttributesTestCase(unittest.TestCase):
 
 class CanonicalFormTestCase(unittest.TestCase):
     r"""Enable generating canonical-form test cases over the valid schema.
-        Transforming into Parsing Canonical Form
-        Assuming an input schema (in JSON form) that's already UTF-8 text for a valid Avro schema (including all
-        quotes as required by JSON), the following transformations will produce its Parsing Canonical Form:
-            - [PRIMITIVES] Convert primitive schemas to their simple form (e.g., int instead of {"type":"int"}).
-            - [FULLNAMES] Replace short names with fullnames, using applicable namespaces to do so. Then eliminate
-                namespace attributes, which are now redundant.
-            - [STRIP] Keep only attributes that are relevant to parsing data, which are: type, name, fields, symbols,
-                items, values, size. Strip all others (e.g., doc and aliases).
-            - [ORDER] Order the appearance of fields of JSON objects as follows: name, type, fields, symbols, items,
-                values, size. For example, if an object has type, name, and size fields, then the name field should
-                appear first, followed by the type and then the size fields.
-            - [STRINGS] For all JSON string literals in the schema text, replace any escaped characters
-                (e.g., \uXXXX escapes) with their UTF-8 equivalents.
-            - [INTEGERS] Eliminate quotes around and any leading zeros in front of JSON integer literals
-                (which appear in the size attributes of fixed schemas).
-            - [WHITESPACE] Eliminate all whitespace in JSON outside of string literals.
-        We depend on the Python json parser to properly handle the STRINGS and INTEGERS rules, so
-        we don't test them here.
+    Transforming into Parsing Canonical Form
+    Assuming an input schema (in JSON form) that's already UTF-8 text for a valid Avro schema (including all
+    quotes as required by JSON), the following transformations will produce its Parsing Canonical Form:
+        - [PRIMITIVES] Convert primitive schemas to their simple form (e.g., int instead of {"type":"int"}).
+        - [FULLNAMES] Replace short names with fullnames, using applicable namespaces to do so. Then eliminate
+            namespace attributes, which are now redundant.
+        - [STRIP] Keep only attributes that are relevant to parsing data, which are: type, name, fields, symbols,
+            items, values, size. Strip all others (e.g., doc and aliases).
+        - [ORDER] Order the appearance of fields of JSON objects as follows: name, type, fields, symbols, items,
+            values, size. For example, if an object has type, name, and size fields, then the name field should
+            appear first, followed by the type and then the size fields.
+        - [STRINGS] For all JSON string literals in the schema text, replace any escaped characters
+            (e.g., \uXXXX escapes) with their UTF-8 equivalents.
+        - [INTEGERS] Eliminate quotes around and any leading zeros in front of JSON integer literals
+            (which appear in the size attributes of fixed schemas).
+        - [WHITESPACE] Eliminate all whitespace in JSON outside of string literals.
+    We depend on the Python json parser to properly handle the STRINGS and INTEGERS rules, so
+    we don't test them here.
     """
 
     def compact_json_string(self, json_doc):
@@ -662,62 +876,62 @@ class CanonicalFormTestCase(unittest.TestCase):
         Returns:
             str: Compact-encoded, stringified JSON document
         """
-        return json.dumps(json_doc, separators=(',', ':'))
+        return json.dumps(json_doc, separators=(",", ":"))
 
     def test_primitive_int(self):
         """
         Convert primitive schemas to their simple form (e.g., int instead of {"type":"int"}).
         """
-        s = avro.schema.parse(json.dumps('int'))
+        s = avro.schema.parse(json.dumps("int"))
         self.assertEqual(s.canonical_form, '"int"')
 
         s = avro.schema.parse(json.dumps({"type": "int"}))
         self.assertEqual(s.canonical_form, '"int"')
 
     def test_primitive_float(self):
-        s = avro.schema.parse(json.dumps('float'))
+        s = avro.schema.parse(json.dumps("float"))
         self.assertEqual(s.canonical_form, '"float"')
 
         s = avro.schema.parse(json.dumps({"type": "float"}))
         self.assertEqual(s.canonical_form, '"float"')
 
     def test_primitive_double(self):
-        s = avro.schema.parse(json.dumps('double'))
+        s = avro.schema.parse(json.dumps("double"))
         self.assertEqual(s.canonical_form, '"double"')
 
         s = avro.schema.parse(json.dumps({"type": "double"}))
         self.assertEqual(s.canonical_form, '"double"')
 
     def test_primitive_null(self):
-        s = avro.schema.parse(json.dumps('null'))
+        s = avro.schema.parse(json.dumps("null"))
         self.assertEqual(s.canonical_form, '"null"')
 
         s = avro.schema.parse(json.dumps({"type": "null"}))
         self.assertEqual(s.canonical_form, '"null"')
 
     def test_primitive_bytes(self):
-        s = avro.schema.parse(json.dumps('bytes'))
+        s = avro.schema.parse(json.dumps("bytes"))
         self.assertEqual(s.canonical_form, '"bytes"')
 
         s = avro.schema.parse(json.dumps({"type": "bytes"}))
         self.assertEqual(s.canonical_form, '"bytes"')
 
     def test_primitive_long(self):
-        s = avro.schema.parse(json.dumps('long'))
+        s = avro.schema.parse(json.dumps("long"))
         self.assertEqual(s.canonical_form, '"long"')
 
         s = avro.schema.parse(json.dumps({"type": "long"}))
         self.assertEqual(s.canonical_form, '"long"')
 
     def test_primitive_boolean(self):
-        s = avro.schema.parse(json.dumps('boolean'))
+        s = avro.schema.parse(json.dumps("boolean"))
         self.assertEqual(s.canonical_form, '"boolean"')
 
         s = avro.schema.parse(json.dumps({"type": "boolean"}))
         self.assertEqual(s.canonical_form, '"boolean"')
 
     def test_primitive_string(self):
-        s = avro.schema.parse(json.dumps('string'))
+        s = avro.schema.parse(json.dumps("string"))
         self.assertEqual(s.canonical_form, '"string"')
 
         s = avro.schema.parse(json.dumps({"type": "string"}))
@@ -731,10 +945,8 @@ class CanonicalFormTestCase(unittest.TestCase):
         s = avro.schema.parse('{"name":"md5","type":"fixed","size":16}')
         self.assertEqual(
             s.canonical_form,
-            self.compact_json_string({
-                "name": "md5",
-                "type": "fixed",
-                "size": 16}))
+            self.compact_json_string({"name": "md5", "type": "fixed", "size": 16}),
+        )
 
     def test_string_with_escaped_characters(self):
         """
@@ -743,44 +955,48 @@ class CanonicalFormTestCase(unittest.TestCase):
         s = avro.schema.parse('{"name":"\u0041","type":"fixed","size":16}')
         self.assertEqual(
             s.canonical_form,
-            self.compact_json_string({
-                "name": "A",
-                "type": "fixed",
-                "size": 16}))
+            self.compact_json_string({"name": "A", "type": "fixed", "size": 16}),
+        )
 
     def test_fullname(self):
         """
         Replace short names with fullnames, using applicable namespaces to do so. Then eliminate namespace attributes, which are now redundant.
         """
-        s = avro.schema.parse(json.dumps({
-            "namespace": "avro",
-            "name": "example",
-            "type": "enum",
-            "symbols": ["a", "b"]}))
+        s = avro.schema.parse(
+            json.dumps(
+                {
+                    "namespace": "avro",
+                    "name": "example",
+                    "type": "enum",
+                    "symbols": ["a", "b"],
+                }
+            )
+        )
         self.assertEqual(
             s.canonical_form,
-            self.compact_json_string({
-                "name": "avro.example",
-                "type": "enum",
-                "symbols": ["a", "b"]}))
+            self.compact_json_string({"name": "avro.example", "type": "enum", "symbols": ["a", "b"]}),
+        )
 
     def test_strip(self):
         """
         Keep only attributes that are relevant to parsing data, which are: type, name, fields, symbols, items, values,
         size. Strip all others (e.g., doc and aliases).
         """
-        s = avro.schema.parse(json.dumps({
-            "name": "foo",
-            "type": "enum",
-            "doc": "test",
-            "aliases": ["bar"],
-            "symbols": ["a", "b"]}))
+        s = avro.schema.parse(
+            json.dumps(
+                {
+                    "name": "foo",
+                    "type": "enum",
+                    "doc": "test",
+                    "aliases": ["bar"],
+                    "symbols": ["a", "b"],
+                }
+            )
+        )
         self.assertEqual(
             s.canonical_form,
-            self.compact_json_string({
-                "name": "foo",
-                "type": "enum",
-                "symbols": ["a", "b"]}))
+            self.compact_json_string({"name": "foo", "type": "enum", "symbols": ["a", "b"]}),
+        )
 
     def test_order(self):
         """
@@ -788,114 +1004,102 @@ class CanonicalFormTestCase(unittest.TestCase):
         For example, if an object has type, name, and size fields, then the name field should appear first, followed
         by the type and then the size fields.
         """
-        s = avro.schema.parse(json.dumps({
-            "symbols": ["a", "b"],
-            "type": "enum",
-            "name": "example"}))
+        s = avro.schema.parse(json.dumps({"symbols": ["a", "b"], "type": "enum", "name": "example"}))
         self.assertEqual(
             s.canonical_form,
-            self.compact_json_string({
-                "name": "example",
-                "type": "enum",
-                "symbols": ["a", "b"]}))
+            self.compact_json_string({"name": "example", "type": "enum", "symbols": ["a", "b"]}),
+        )
 
     def test_whitespace(self):
         """
         Eliminate all whitespace in JSON outside of string literals.
         """
         s = avro.schema.parse(
-            '''{"type": "fixed",
+            """{"type": "fixed",
             "size": 16,
             "name": "md5"}
-                ''')
+                """
+        )
         self.assertEqual(
             s.canonical_form,
-            self.compact_json_string({
-                "name": "md5",
-                "type": "fixed",
-                "size": 16}))
+            self.compact_json_string({"name": "md5", "type": "fixed", "size": 16}),
+        )
 
     def test_record_field(self):
         """
         Ensure that record fields produce the correct parsing canonical form.
         """
-        s = avro.schema.parse(json.dumps({
-            "type": "record",
-            "name": "Test",
-            "doc": "This is a test schema",
-            "aliases": ["also", "known", "as"],
-            "fields": [
-                {
-                    "type": {
-                        "symbols": ["one", "two"],
-                        "type": "enum",
-                        "name": "NamedEnum"},
-                    "name": "thenamedenum",
-                    "doc": "This is a named enum"
-                },
-                {
-                    "type": ["null", "NamedEnum"],
-                    "name": "unionwithreftoenum"
-                }
-            ]
-        }))
-        expected = self.compact_json_string({
-            "name": "Test",
-            "type": "record",
-            "fields": [
-                {
-                    "name": "thenamedenum",
-                    "type": {
-                        "name": "NamedEnum",
-                        "type": "enum",
-                        "symbols": ["one", "two"]
-                    }
-                },
+        s = avro.schema.parse(
+            json.dumps(
                 {
-                    "name": "unionwithreftoenum",
-                    "type": ["null", "NamedEnum"]
+                    "type": "record",
+                    "name": "Test",
+                    "doc": "This is a test schema",
+                    "aliases": ["also", "known", "as"],
+                    "fields": [
+                        {
+                            "type": {
+                                "symbols": ["one", "two"],
+                                "type": "enum",
+                                "name": "NamedEnum",
+                            },
+                            "name": "thenamedenum",
+                            "doc": "This is a named enum",
+                        },
+                        {"type": ["null", "NamedEnum"], "name": "unionwithreftoenum"},
+                    ],
                 }
-            ]
-        })
+            )
+        )
+        expected = self.compact_json_string(
+            {
+                "name": "Test",
+                "type": "record",
+                "fields": [
+                    {
+                        "name": "thenamedenum",
+                        "type": {
+                            "name": "NamedEnum",
+                            "type": "enum",
+                            "symbols": ["one", "two"],
+                        },
+                    },
+                    {"name": "unionwithreftoenum", "type": ["null", "NamedEnum"]},
+                ],
+            }
+        )
         self.assertEqual(s.canonical_form, expected)
 
     def test_array(self):
         """
         Ensure that array schema produce the correct parsing canonical form.
         """
-        s = avro.schema.parse(json.dumps({
-            "items": "long",
-            "type": "array"}))
+        s = avro.schema.parse(json.dumps({"items": "long", "type": "array"}))
         self.assertEqual(
             s.canonical_form,
-            self.compact_json_string({
-                "type": "array",
-                "items": "long"}))
+            self.compact_json_string({"type": "array", "items": "long"}),
+        )
 
     def test_map(self):
         """
         Ensure that map schema produce the correct parsing canonical form.
         """
-        s = avro.schema.parse(json.dumps({
-            "values": "long",
-            "type": "map"}))
+        s = avro.schema.parse(json.dumps({"values": "long", "type": "map"}))
         self.assertEqual(
             s.canonical_form,
-            self.compact_json_string({
-                "type": "map",
-                "values": "long"}))
+            self.compact_json_string({"type": "map", "values": "long"}),
+        )
 
     def test_union(self):
         """
         Ensure that a union schema produces the correct parsing canonical form.
         """
         s = avro.schema.parse(json.dumps(["string", "null", "long"]))
-        self.assertEqual(
-            s.canonical_form,
-            '["string","null","long"]')
+        self.assertEqual(s.canonical_form, '["string","null","long"]')
 
     def test_large_record_handshake_request(self):
-        s = avro.schema.parse("""
+        s = avro.schema.parse(
+            """
             {
             "type": "record",
             "name": "HandshakeRequest",
@@ -913,17 +1117,22 @@ class CanonicalFormTestCase(unittest.TestCase):
                 }
             ]
             }
-            """)
+            """
+        )
         self.assertEqual(
             s.canonical_form,
-            ('{"name":"org.apache.avro.ipc.HandshakeRequest","type":"record",'
-             '"fields":[{"name":"clientHash","type":{"name":"org.apache.avro.ipc.MD5",'
-             '"type":"fixed","size":16}},{"name":"clientProtocol","type":["null","string"]},'
-             '{"name":"serverHash","type":{"name":"org.apache.avro.ipc.MD5","type":"fixed","size":16}},'
-             '{"name":"meta","type":["null",{"type":"map","values":"bytes"}]}]}'))
+            (
+                '{"name":"org.apache.avro.ipc.HandshakeRequest","type":"record",'
+                '"fields":[{"name":"clientHash","type":{"name":"org.apache.avro.ipc.MD5",'
+                '"type":"fixed","size":16}},{"name":"clientProtocol","type":["null","string"]},'
+                '{"name":"serverHash","type":{"name":"org.apache.avro.ipc.MD5","type":"fixed","size":16}},'
+                '{"name":"meta","type":["null",{"type":"map","values":"bytes"}]}]}'
+            ),
+        )
 
     def test_large_record_handshake_response(self):
-        s = avro.schema.parse("""
+        s = avro.schema.parse(
+            """
             {
             "type": "record",
             "name": "HandshakeResponse",
@@ -946,19 +1155,24 @@ class CanonicalFormTestCase(unittest.TestCase):
                 "name": "meta",
                 "type": ["null", {"type": "map", "values": "bytes"}]}]
                 }
-            """)
+            """
+        )
         self.assertEqual(
             s.canonical_form,
-            ('{"name":"org.apache.avro.ipc.HandshakeResponse","type":"rec'
-             'ord","fields":[{"name":"match","type":{"name":"org.apache.a'
-             'vro.ipc.HandshakeMatch","type":"enum","symbols":["BOTH","CL'
-             'IENT","NONE"]}},{"name":"serverProtocol","type":["null","st'
-             'ring"]},{"name":"serverHash","type":["null",{"name":"org.ap'
-             'ache.avro.ipc.MD5","type":"fixed","size":16}]},{"name":"met'
-             'a","type":["null",{"type":"map","values":"bytes"}]}]}'))
+            (
+                '{"name":"org.apache.avro.ipc.HandshakeResponse","type":"rec'
+                'ord","fields":[{"name":"match","type":{"name":"org.apache.a'
+                'vro.ipc.HandshakeMatch","type":"enum","symbols":["BOTH","CL'
+                'IENT","NONE"]}},{"name":"serverProtocol","type":["null","st'
+                'ring"]},{"name":"serverHash","type":["null",{"name":"org.ap'
+                'ache.avro.ipc.MD5","type":"fixed","size":16}]},{"name":"met'
+                'a","type":["null",{"type":"map","values":"bytes"}]}]}'
+            ),
+        )
 
     def test_large_record_interop(self):
-        s = avro.schema.parse("""
+        s = avro.schema.parse(
+            """
             {
             "type": "record",
             "name": "Interop",
@@ -1005,25 +1219,29 @@ class CanonicalFormTestCase(unittest.TestCase):
                 }
             ]
             }
-            """)
+            """
+        )
         self.assertEqual(
             s.canonical_form,
-            ('{"name":"org.apache.avro.Interop","type":"record","fields":[{"na'
-             'me":"intField","type":"int"},{"name":"longField","type":"long"},'
-             '{"name":"stringField","type":"string"},{"name":"boolField","type'
-             '":"boolean"},{"name":"floatField","type":"float"},{"name":"doubl'
-             'eField","type":"double"},{"name":"bytesField","type":"bytes"},{"'
-             'name":"nullField","type":"null"},{"name":"arrayField","type":{"t'
-             'ype":"array","items":"double"}},{"name":"mapField","type":{"type'
-             '":"map","values":{"name":"org.apache.avro.Foo","type":"record","'
-             'fields":[{"name":"label","type":"string"}]}}},{"name":"unionFiel'
-             'd","type":["boolean","double",{"type":"array","items":"bytes"}]}'
-             ',{"name":"enumField","type":{"name":"org.apache.avro.Kind","type'
-             '":"enum","symbols":["A","B","C"]}},{"name":"fixedField","type":{'
-             '"name":"org.apache.avro.MD5","type":"fixed","size":16}},{"name":'
-             '"recordField","type":{"name":"org.apache.avro.Node","type":"reco'
-             'rd","fields":[{"name":"label","type":"string"},{"name":"children'
-             '","type":{"type":"array","items":"org.apache.avro.Node"}}]}}]}'))
+            (
+                '{"name":"org.apache.avro.Interop","type":"record","fields":[{"na'
+                'me":"intField","type":"int"},{"name":"longField","type":"long"},'
+                '{"name":"stringField","type":"string"},{"name":"boolField","type'
+                '":"boolean"},{"name":"floatField","type":"float"},{"name":"doubl'
+                'eField","type":"double"},{"name":"bytesField","type":"bytes"},{"'
+                'name":"nullField","type":"null"},{"name":"arrayField","type":{"t'
+                'ype":"array","items":"double"}},{"name":"mapField","type":{"type'
+                '":"map","values":{"name":"org.apache.avro.Foo","type":"record","'
+                'fields":[{"name":"label","type":"string"}]}}},{"name":"unionFiel'
+                'd","type":["boolean","double",{"type":"array","items":"bytes"}]}'
+                ',{"name":"enumField","type":{"name":"org.apache.avro.Kind","type'
+                '":"enum","symbols":["A","B","C"]}},{"name":"fixedField","type":{'
+                '"name":"org.apache.avro.MD5","type":"fixed","size":16}},{"name":'
+                '"recordField","type":{"name":"org.apache.avro.Node","type":"reco'
+                'rd","fields":[{"name":"label","type":"string"},{"name":"children'
+                '","type":{"type":"array","items":"org.apache.avro.Node"}}]}}]}'
+            ),
+        )
 
 
 def load_tests(loader, default_tests, pattern):
@@ -1038,5 +1256,5 @@ def load_tests(loader, default_tests, pattern):
     return suite
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/lang/py/avro/test/test_script.py b/lang/py/avro/test/test_script.py
index 633cdaf..32437dc 100644
--- a/lang/py/avro/test/test_script.py
+++ b/lang/py/avro/test/test_script.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -37,7 +35,7 @@ import avro.schema
 NUM_RECORDS = 7
 
 
-SCHEMA = '''
+SCHEMA = """
 {
     "namespace": "test.avro",
         "name": "LooneyTunes",
@@ -48,7 +46,7 @@ SCHEMA = '''
             {"name": "type", "type": "string"}
         ]
 }
-'''
+"""
 
 LOONIES = (
     ("daffy", "duck", "duck"),
@@ -68,11 +66,11 @@ def looney_records():
 
 SCRIPT = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "scripts", "avro")
 
-_JSON_PRETTY = '''{
+_JSON_PRETTY = """{
     "first": "daffy",
     "last": "duck",
     "type": "duck"
-}'''
+}"""
 
 
 def gen_avro(filename):
@@ -149,22 +147,20 @@ class TestCat(unittest.TestCase):
 
     def test_fields(self):
         # One field selection (no comma)
-        out = self._run('--fields', 'last')
-        assert json.loads(out[0]) == {'last': 'duck'}
+        out = self._run("--fields", "last")
+        assert json.loads(out[0]) == {"last": "duck"}
 
         # Field selection (with comma and space)
-        out = self._run('--fields', 'first, last')
-        assert json.loads(out[0]) == {'first': 'daffy', 'last': 'duck'}
+        out = self._run("--fields", "first, last")
+        assert json.loads(out[0]) == {"first": "daffy", "last": "duck"}
 
         # Empty fields should get all
-        out = self._run('--fields', '')
-        assert json.loads(out[0]) == \
-            {'first': 'daffy', 'last': 'duck',
-             'type': 'duck'}
+        out = self._run("--fields", "")
+        assert json.loads(out[0]) == {"first": "daffy", "last": "duck", "type": "duck"}
 
         # Non existing fields are ignored
-        out = self._run('--fields', 'first,last,age')
-        assert json.loads(out[0]) == {'first': 'daffy', 'last': 'duck'}
+        out = self._run("--fields", "first,last,age")
+        assert json.loads(out[0]) == {"first": "daffy", "last": "duck"}
 
 
 class TestWrite(unittest.TestCase):
@@ -234,7 +230,7 @@ class TestWrite(unittest.TestCase):
 
     def test_multi_file(self):
         tmp = _tempfile()
-        with open(tmp, 'wb') as o:
+        with open(tmp, "wb") as o:
             self._run(self.json_file, self.json_file, stdout=o)
         assert len(self.load_avro(tmp)) == 2 * NUM_RECORDS
         os.unlink(tmp)
diff --git a/lang/py/avro/test/test_tether_task.py b/lang/py/avro/test/test_tether_task.py
index 40aec03..ca50247 100644
--- a/lang/py/avro/test/test_tether_task.py
+++ b/lang/py/avro/test/test_tether_task.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -52,7 +50,7 @@ class TestTetherTask(unittest.TestCase):
             # launch the server in a separate process
             proc = subprocess.Popen([sys.executable, pyfile, "start_server", str(server_port)])
 
-            print("Mock server started process pid={}".format(proc.pid))
+            print(f"Mock server started process pid={proc.pid}")
 
             # Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
             # so we give the subprocess time to start up
@@ -67,7 +65,7 @@ class TestTetherTask(unittest.TestCase):
             task.configure(
                 avro.tether.tether_task.TaskType.MAP,
                 str(task.inschema),
-                str(task.midschema)
+                str(task.midschema),
             )
 
             # Serialize some data so we can send it to the input function
@@ -87,7 +85,7 @@ class TestTetherTask(unittest.TestCase):
             task.configure(
                 avro.tether.tether_task.TaskType.REDUCE,
                 str(task.midschema),
-                str(task.outschema)
+                str(task.outschema),
             )
 
             # Serialize some data so we can send it to the input function
@@ -109,9 +107,9 @@ class TestTetherTask(unittest.TestCase):
             task.status("Status message")
         finally:
             # close the process
-            if not(proc is None):
+            if not (proc is None):
                 proc.kill()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/lang/py/avro/test/test_tether_task_runner.py b/lang/py/avro/test/test_tether_task_runner.py
index 16b5b99..25b7cd2 100644
--- a/lang/py/avro/test/test_tether_task_runner.py
+++ b/lang/py/avro/test/test_tether_task_runner.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -46,14 +44,14 @@ class TestTetherTaskRunner(unittest.TestCase):
         try:
             # launch the server in a separate process
             env = dict()
-            env["PYTHONPATH"] = ':'.join(sys.path)
+            env["PYTHONPATH"] = ":".join(sys.path)
             parent_port = avro.tether.util.find_port()
 
             pyfile = avro.test.mock_tether_parent.__file__
-            proc = subprocess.Popen([sys.executable, pyfile, "start_server", "{0}".format(parent_port)])
+            proc = subprocess.Popen([sys.executable, pyfile, "start_server", f"{parent_port}"])
             input_port = avro.tether.util.find_port()
 
-            print("Mock server started process pid={0}".format(proc.pid))
+            print(f"Mock server started process pid={proc.pid}")
             # Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
             # so we give the subprocess time to start up
             time.sleep(1)
@@ -70,17 +68,23 @@ class TestTetherTaskRunner(unittest.TestCase):
 
             # Test sending various messages to the server and ensuring they are processed correctly
             requestor = avro.tether.tether_task.HTTPRequestor(
-                "localhost", runner.server.server_address[1], avro.tether.tether_task.inputProtocol)
+                "localhost",
+                runner.server.server_address[1],
+                avro.tether.tether_task.inputProtocol,
+            )
 
             # TODO: We should validate that open worked by grabbing the STDOUT of the subproces
             # and ensuring that it outputted the correct message.
 
             # Test the mapper
-            requestor.request("configure", {
-                "taskType": avro.tether.tether_task.TaskType.MAP,
-                "inSchema": str(runner.task.inschema),
-                "outSchema": str(runner.task.midschema)
-            })
+            requestor.request(
+                "configure",
+                {
+                    "taskType": avro.tether.tether_task.TaskType.MAP,
+                    "inSchema": str(runner.task.inschema),
+                    "outSchema": str(runner.task.midschema),
+                },
+            )
 
             # Serialize some data so we can send it to the input function
             datum = "This is a line of text"
@@ -96,10 +100,13 @@ class TestTetherTaskRunner(unittest.TestCase):
             requestor.request("input", {"data": data, "count": 1})
 
             # Test the reducer
-            requestor.request("configure", {
-                "taskType": avro.tether.tether_task.TaskType.REDUCE,
-                "inSchema": str(runner.task.midschema),
-                "outSchema": str(runner.task.outschema)}
+            requestor.request(
+                "configure",
+                {
+                    "taskType": avro.tether.tether_task.TaskType.REDUCE,
+                    "inSchema": str(runner.task.midschema),
+                    "outSchema": str(runner.task.outschema),
+                },
             )
 
             # Serialize some data so we can send it to the input function
@@ -137,7 +144,7 @@ class TestTetherTaskRunner(unittest.TestCase):
             raise
         finally:
             # close the process
-            if not(proc is None):
+            if not (proc is None):
                 proc.kill()
 
     def test2(self):
@@ -152,11 +159,11 @@ class TestTetherTaskRunner(unittest.TestCase):
         try:
             # launch the server in a separate process
             env = dict()
-            env["PYTHONPATH"] = ':'.join(sys.path)
+            env["PYTHONPATH"] = ":".join(sys.path)
             parent_port = avro.tether.util.find_port()
 
             pyfile = avro.test.mock_tether_parent.__file__
-            proc = subprocess.Popen([sys.executable, pyfile, "start_server", "{0}".format(parent_port)])
+            proc = subprocess.Popen([sys.executable, pyfile, "start_server", f"{parent_port}"])
 
             # Possible race condition? when we start tether_task_runner it will call
             # open tries to connect to the subprocess before the subprocess is fully started
@@ -164,16 +171,22 @@ class TestTetherTaskRunner(unittest.TestCase):
             time.sleep(1)
 
             # start the tether_task_runner in a separate process
-            env = {"AVRO_TETHER_OUTPUT_PORT": "{0}".format(parent_port)}
-            env["PYTHONPATH"] = ':'.join(sys.path)
-
-            runnerproc = subprocess.Popen([sys.executable, avro.tether.tether_task_runner.__file__,
-                                          "avro.test.word_count_task.WordCountTask"], env=env)
+            env = {"AVRO_TETHER_OUTPUT_PORT": f"{parent_port}"}
+            env["PYTHONPATH"] = ":".join(sys.path)
+
+            runnerproc = subprocess.Popen(
+                [
+                    sys.executable,
+                    avro.tether.tether_task_runner.__file__,
+                    "avro.test.word_count_task.WordCountTask",
+                ],
+                env=env,
+            )
 
             # possible race condition wait for the process to start
             time.sleep(1)
 
-            print("Mock server started process pid={0}".format(proc.pid))
+            print(f"Mock server started process pid={proc.pid}")
             # Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
             # so we give the subprocess time to start up
             time.sleep(1)
@@ -182,10 +195,10 @@ class TestTetherTaskRunner(unittest.TestCase):
             raise
         finally:
             # close the process
-            if not(runnerproc is None):
+            if not (runnerproc is None):
                 runnerproc.kill()
 
-            if not(proc is None):
+            if not (proc is None):
                 proc.kill()
 
 
diff --git a/lang/py/avro/test/test_tether_word_count.py b/lang/py/avro/test/test_tether_word_count.py
index eaf0ff5..abcac02 100644
--- a/lang/py/avro/test/test_tether_word_count.py
+++ b/lang/py/avro/test/test_tether_word_count.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -39,18 +37,25 @@ _AVRO_DIR = os.path.abspath(os.path.dirname(avro.__file__))
 
 
 def _version():
-    with open(os.path.join(_AVRO_DIR, 'VERSION.txt')) as v:
+    with open(os.path.join(_AVRO_DIR, "VERSION.txt")) as v:
         # Convert it back to the java version
-        return v.read().strip().replace('+', '-')
+        return v.read().strip().replace("+", "-")
 
 
 _AVRO_VERSION = _version()
-_JAR_PATH = os.path.join(os.path.dirname(os.path.dirname(_AVRO_DIR)),
-                         "java", "tools", "target", "avro-tools-{}.jar".format(_AVRO_VERSION))
-
-_LINES = ("the quick brown fox jumps over the lazy dog",
-          "the cow jumps over the moon",
-          "the rain in spain falls mainly on the plains")
+_JAR_PATH = os.path.join(
+    os.path.dirname(os.path.dirname(_AVRO_DIR)),
+    "java",
+    "tools",
+    "target",
+    f"avro-tools-{_AVRO_VERSION}.jar",
+)
+
+_LINES = (
+    "the quick brown fox jumps over the lazy dog",
+    "the cow jumps over the moon",
+    "the rain in spain falls mainly on the plains",
+)
 _IN_SCHEMA = '"string"'
 
 # The schema for the output of the mapper and reducer
@@ -62,8 +67,7 @@ _OUT_SCHEMA = """{
              {"name": "value", "type": "long", "order": "ignore"}]
 }"""
 
-_PYTHON_PATH = os.pathsep.join([os.path.dirname(os.path.dirname(avro.__file__)),
-                                os.path.dirname(__file__)])
+_PYTHON_PATH = os.pathsep.join([os.path.dirname(os.path.dirname(avro.__file__)), os.path.dirname(__file__)])
 
 
 def _has_java():
@@ -81,12 +85,12 @@ def _has_java():
             output = subprocess.check_output("/usr/libexec/java_home", stderr=subprocess.STDOUT)
         except subprocess.CalledProcessError as e:
             output = e.output
-        return (b"No Java runtime present" not in output)
+        return b"No Java runtime present" not in output
     return bool(distutils.spawn.find_executable("java"))
 
 
 @unittest.skipUnless(_has_java(), "No Java runtime present")
-@unittest.skipUnless(os.path.exists(_JAR_PATH), "{} not found".format(_JAR_PATH))
+@unittest.skipUnless(os.path.exists(_JAR_PATH), f"{_JAR_PATH} not found")
 class TestTetherWordCount(unittest.TestCase):
     """unittest for a python tethered map-reduce job."""
 
@@ -107,11 +111,11 @@ class TestTetherWordCount(unittest.TestCase):
             os.makedirs(self._input_path)
         infile = os.path.join(self._input_path, "lines.avro")
         self._write_lines(_LINES, infile)
-        self.assertTrue(os.path.exists(infile), "Missing the input file {}".format(infile))
+        self.assertTrue(os.path.exists(infile), f"Missing the input file {infile}")
 
         # ...and the output schema...
         self._output_schema_path = os.path.join(self._base_dir, "output.avsc")
-        with open(self._output_schema_path, 'w') as output_schema_handle:
+        with open(self._output_schema_path, "w") as output_schema_handle:
             output_schema_handle.write(_OUT_SCHEMA)
         self.assertTrue(os.path.exists(self._output_schema_path), "Missing the schema file")
 
@@ -136,28 +140,39 @@ class TestTetherWordCount(unittest.TestCase):
         """
         datum_writer = avro.io.DatumWriter(_IN_SCHEMA)
         writers_schema = avro.schema.parse(_IN_SCHEMA)
-        with avro.datafile.DataFileWriter(open(fname, 'wb'), datum_writer, writers_schema) as writer:
+        with avro.datafile.DataFileWriter(open(fname, "wb"), datum_writer, writers_schema) as writer:
             for datum in lines:
                 writer.append(datum)
 
     def test_tether_word_count(self):
         """Check that a tethered map-reduce job produces the output expected locally."""
         # Run the job...
-        args = ("java", "-jar", _JAR_PATH, "tether",
-                "--protocol", "http",
-                "--in", self._input_path,
-                "--out", self._output_path,
-                "--outschema", self._output_schema_path,
-                "--program", sys.executable,
-                "--exec_args", "-m avro.tether.tether_task_runner word_count_task.WordCountTask")
-        print("Command:\n\t{0}".format(" ".join(args)))
+        args = (
+            "java",
+            "-jar",
+            _JAR_PATH,
+            "tether",
+            "--protocol",
+            "http",
+            "--in",
+            self._input_path,
+            "--out",
+            self._output_path,
+            "--outschema",
+            self._output_schema_path,
+            "--program",
+            sys.executable,
+            "--exec_args",
+            "-m avro.tether.tether_task_runner word_count_task.WordCountTask",
+        )
+        print(f"Command:\n\t{' '.join(args)}")
         subprocess.check_call(args, env={"PYTHONPATH": _PYTHON_PATH, "PATH": os.environ["PATH"]})
 
         # ...and test the results.
         datum_reader = avro.io.DatumReader()
         outfile = os.path.join(self._output_path, "part-00000.avro")
-        expected_counts = collections.Counter(' '.join(_LINES).split())
-        with avro.datafile.DataFileReader(open(outfile, 'rb'), datum_reader) as reader:
+        expected_counts = collections.Counter(" ".join(_LINES).split())
+        with avro.datafile.DataFileReader(open(outfile, "rb"), datum_reader) as reader:
             actual_counts = {r["key"]: r["value"] for r in reader}
         self.assertDictEqual(actual_counts, expected_counts)
 
diff --git a/lang/py/avro/test/word_count_task.py b/lang/py/avro/test/word_count_task.py
index 3e3d79e..1592738 100644
--- a/lang/py/avro/test/word_count_task.py
+++ b/lang/py/avro/test/word_count_task.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -34,8 +32,7 @@ class WordCountTask(avro.tether.tether_task.TetherTask):
     """
 
     def __init__(self):
-        """
-        """
+        """ """
 
         inschema = """{"type":"string"}"""
         midschema = """{"type":"record", "name":"Pair","namespace":"org.apache.avro.mapred","fields":[
@@ -60,7 +57,7 @@ class WordCountTask(avro.tether.tether_task.TetherTask):
         words = record.split()
 
         for w in words:
-            logging.info("WordCountTask.Map: word={0}".format(w))
+            logging.info("WordCountTask.Map: word=%s", w)
             collector.collect({"key": w, "value": 1})
 
     def reduce(self, record, collector):
@@ -90,7 +87,7 @@ class WordCountTask(avro.tether.tether_task.TetherTask):
         """
 
         # collect the current record
-        logging.info("WordCountTask.reduceFlush key={0} value={1}".format(record["key"], self.psum))
+        logging.info("WordCountTask.reduceFlush key=%s value=%s", record["key"], self.psum)
 
         collector.collect({"key": record["key"], "value": self.psum})
 
diff --git a/lang/py/avro/tether/__init__.py b/lang/py/avro/tether/__init__.py
index 603f1bf..4875581 100644
--- a/lang/py/avro/tether/__init__.py
+++ b/lang/py/avro/tether/__init__.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -20,6 +18,12 @@
 # specific language governing permissions and limitations
 # under the License.
 
-from avro.tether.tether_task import HTTPRequestor, TaskType, TetherTask, inputProtocol, outputProtocol
+from avro.tether.tether_task import (
+    HTTPRequestor,
+    TaskType,
+    TetherTask,
+    inputProtocol,
+    outputProtocol,
+)
 from avro.tether.tether_task_runner import TaskRunner
 from avro.tether.util import find_port
diff --git a/lang/py/avro/tether/tether_task.py b/lang/py/avro/tether/tether_task.py
index 0a7bb70..c6bed4c 100644
--- a/lang/py/avro/tether/tether_task.py
+++ b/lang/py/avro/tether/tether_task.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -42,7 +40,7 @@ __all__ = ["TetherTask", "TaskType", "inputProtocol", "outputProtocol", "HTTPReq
 
 TaskType = None
 pfile = os.path.split(__file__)[0] + os.sep + "InputProtocol.avpr"
-with open(pfile, 'r') as hf:
+with open(pfile) as hf:
     prototxt = hf.read()
 
 inputProtocol = avro.protocol.parse(prototxt)
@@ -56,7 +54,7 @@ TaskType = _ttype(*taskschema.symbols)
 
 pfile = os.path.split(__file__)[0] + os.sep + "OutputProtocol.avpr"
 
-with open(pfile, 'r') as hf:
+with open(pfile) as hf:
     prototxt = hf.read()
 
 outputProtocol = avro.protocol.parse(prototxt)
@@ -117,7 +115,7 @@ def keys_are_equal(rec1, rec2, fkeys):
     """
 
     for f in fkeys:
-        if not(rec1[f] == rec2[f]):
+        if not (rec1[f] == rec2[f]):
             return False
 
     return True
@@ -249,7 +247,7 @@ class TetherTask(abc.ABC):
         if clientPort == 0:
             raise avro.errors.UsageError("AVRO_TETHER_OUTPUT_PORT env var is not set")
 
-        self.log.info("TetherTask.open: Opening connection to parent server on port={0}".format(clientPort))
+        self.log.info("TetherTask.open: Opening connection to parent server on port=%d", clientPort)
 
         # self.outputClient =  avro.ipc.Requestor(outputProtocol, self.clientTransceiver)
         # since HTTP is stateless, a new transciever
@@ -261,7 +259,7 @@ class TetherTask(abc.ABC):
         self.outputClient = HTTPRequestor("127.0.0.1", clientPort, outputProtocol)
 
         try:
-            self.outputClient.request('configure', {"port": inputport})
+            self.outputClient.request("configure", {"port": inputport})
         except Exception:
             estr = traceback.format_exc()
             self.fail(estr)
@@ -288,17 +286,17 @@ class TetherTask(abc.ABC):
             inSchema = avro.schema.parse(inSchemaText)
             outSchema = avro.schema.parse(outSchemaText)
 
-            if (taskType == TaskType.MAP):
+            if taskType == TaskType.MAP:
                 self.inReader = avro.io.DatumReader(writers_schema=inSchema, readers_schema=self.inschema)
                 self.midCollector = Collector(outSchemaText, self.outputClient)
 
-            elif(taskType == TaskType.REDUCE):
+            elif taskType == TaskType.REDUCE:
                 self.midReader = avro.io.DatumReader(writers_schema=inSchema, readers_schema=self.midschema)
                 # this.outCollector = new Collector<OUT>(outSchema);
                 self.outCollector = Collector(outSchemaText, self.outputClient)
 
                 # determine which fields in the input record are they keys for the reducer
-                self._red_fkeys = [f.name for f in self.midschema.fields if not(f.order == 'ignore')]
+                self._red_fkeys = [f.name for f in self.midschema.fields if not (f.order == "ignore")]
 
         except Exception as e:
 
@@ -315,7 +313,7 @@ class TetherTask(abc.ABC):
         self._partitions = npartitions
 
     def input(self, data, count):
-        """ Recieve input from the server
+        """Recieve input from the server
 
         Parameters
         ------------------------------------------------------
@@ -329,20 +327,20 @@ class TetherTask(abc.ABC):
             decoder = avro.io.BinaryDecoder(bdata)
 
             for i in range(count):
-                if (self.taskType == TaskType.MAP):
+                if self.taskType == TaskType.MAP:
                     inRecord = self.inReader.read(decoder)
 
                     # Do we need to pass midCollector if its declared as an instance variable
                     self.map(inRecord, self.midCollector)
 
-                elif (self.taskType == TaskType.REDUCE):
+                elif self.taskType == TaskType.REDUCE:
 
                     # store the previous record
                     prev = self.midRecord
 
                     # read the new record
                     self.midRecord = self.midReader.read(decoder)
-                    if (prev is not None and not(keys_are_equal(self.midRecord, prev, self._red_fkeys))):
+                    if prev is not None and not (keys_are_equal(self.midRecord, prev, self._red_fkeys)):
                         # since the key has changed we need to finalize the processing
                         # for this group of key,value pairs
                         self.reduceFlush(prev, self.outCollector)
@@ -350,19 +348,19 @@ class TetherTask(abc.ABC):
 
         except Exception as e:
             estr = traceback.format_exc()
-            self.log.warning("failing: " + estr)
+            self.log.warning("failing: %s", estr)
             self.fail(estr)
 
     def complete(self):
         """
         Process the complete request
         """
-        if ((self.taskType == TaskType.REDUCE) and not(self.midRecord is None)):
+        if (self.taskType == TaskType.REDUCE) and not (self.midRecord is None):
             try:
                 self.reduceFlush(self.midRecord, self.outCollector)
             except Exception as e:
                 estr = traceback.format_exc()
-                self.log.warning("failing: " + estr)
+                self.log.warning("failing: %s", estr)
                 self.fail(estr)
 
         self.outputClient.request("complete", dict())
@@ -382,7 +380,7 @@ class TetherTask(abc.ABC):
 
     @abc.abstractmethod
     def reduce(self, record, collector):
-        """ Called with input values to generate reducer output. Inputs are sorted by the mapper
+        """Called with input values to generate reducer output. Inputs are sorted by the mapper
         key.
 
         The reduce function is invoked once for each value belonging to a given key outputted
@@ -425,7 +423,7 @@ class TetherTask(abc.ABC):
         """
         Call to fail the task.
         """
-        self.log.error("TetherTask.fail: failure occured message follows:\n{0}".format(message))
+        self.log.error("TetherTask.fail: failure occured message follows:\n%s", message)
         try:
             message = message.decode()
         except AttributeError:
@@ -440,7 +438,7 @@ class TetherTask(abc.ABC):
 
     def close(self):
         self.log.info("TetherTask.close: closing")
-        if not(self.clienTransciever is None):
+        if not (self.clienTransciever is None):
             try:
                 self.clienTransciever.close()
 
diff --git a/lang/py/avro/tether/tether_task_runner.py b/lang/py/avro/tether/tether_task_runner.py
index 98f694e..ccd5a02 100644
--- a/lang/py/avro/tether/tether_task_runner.py
+++ b/lang/py/avro/tether/tether_task_runner.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -60,32 +58,32 @@ class TaskRunnerResponder(avro.ipc.Responder):
 
     def invoke(self, message, request):
         try:
-            if message.name == 'configure':
+            if message.name == "configure":
                 self.log.info("TetherTaskRunner: Received configure")
                 self.task.configure(request["taskType"], request["inSchema"], request["outSchema"])
-            elif message.name == 'partitions':
+            elif message.name == "partitions":
                 self.log.info("TetherTaskRunner: Received partitions")
                 try:
                     self.task.partitions = request["partitions"]
                 except Exception as e:
-                    self.log.error("Exception occured while processing the partitions message: Message:\n" + traceback.format_exc())
+                    self.log.error("Exception occured while processing the partitions message: Message:\n%s", traceback.format_exc())
                     raise
-            elif message.name == 'input':
+            elif message.name == "input":
                 self.log.info("TetherTaskRunner: Received input")
                 self.task.input(request["data"], request["count"])
-            elif message.name == 'abort':
+            elif message.name == "abort":
                 self.log.info("TetherTaskRunner: Received abort")
                 self.runner.close()
-            elif message.name == 'complete':
+            elif message.name == "complete":
                 self.log.info("TetherTaskRunner: Received complete")
                 self.task.complete()
                 self.task.close()
                 self.runner.close()
             else:
-                self.log.warning("TetherTaskRunner: Received unknown message {0}".format(message.name))
+                self.log.warning("TetherTaskRunner: Received unknown message %s", message.name)
 
         except Exception as e:
-            self.log.error("Error occured while processing message: {0}".format(message.name))
+            self.log.error("Error occured while processing message: %s", message.name)
             e = traceback.format_exc()
             self.task.fail(e)
 
@@ -102,20 +100,18 @@ def HTTPHandlerGen(runner):
     runner - instance of the task runner
     """
 
-    if not(isinstance(runner, weakref.ProxyType)):
+    if not (isinstance(runner, weakref.ProxyType)):
         runnerref = weakref.proxy(runner)
     else:
         runnerref = runner
 
     class TaskRunnerHTTPHandler(http.server.BaseHTTPRequestHandler):
-        """Create a handler for the parent.
-        """
+        """Create a handler for the parent."""
 
         runner = runnerref
 
         def __init__(self, *args, **param):
-            """
-            """
+            """ """
             http.server.BaseHTTPRequestHandler.__init__(self, *args, **param)
 
         def do_POST(self):
@@ -124,7 +120,7 @@ def HTTPHandlerGen(runner):
             call_request = call_request_reader.read_framed_message()
             resp_body = self.responder.respond(call_request)
             self.send_response(200)
-            self.send_header('Content-Type', 'avro/binary')
+            self.send_header("Content-Type", "avro/binary")
             self.end_headers()
             resp_writer = avro.ipc.FramedWriter(self.wfile)
             resp_writer.write_framed_message(resp_body)
@@ -187,7 +183,7 @@ class TaskRunner:
         self.task.open(port, clientPort=outputport)
 
         # wait for the other thread to finish
-        if (join):
+        if join:
             self.task.ready_for_shutdown.wait()
             self.server.shutdown()
 
@@ -204,7 +200,7 @@ class TaskRunner:
         self.task.close()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     # TODO::Make the logging level a parameter we can set
     # logging.basicConfig(level=logging.INFO,filename='/tmp/log',filemode='w')
     logging.basicConfig(level=logging.INFO)
@@ -216,7 +212,7 @@ if __name__ == '__main__':
 
     mod, cname = fullcls.rsplit(".", 1)
 
-    logging.info("tether_task_runner.__main__: Task: {0}".format(fullcls))
+    logging.info(f"tether_task_runner.__main__: Task: {fullcls}")
 
     modobj = __import__(mod, fromlist=cname)
 
diff --git a/lang/py/avro/tether/util.py b/lang/py/avro/tether/util.py
index 5bc3410..05454d1 100644
--- a/lang/py/avro/tether/util.py
+++ b/lang/py/avro/tether/util.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
diff --git a/lang/py/avro/timezones.py b/lang/py/avro/timezones.py
index aeef181..2d7667d 100644
--- a/lang/py/avro/timezones.py
+++ b/lang/py/avro/timezones.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
diff --git a/lang/py/avro/tool.py b/lang/py/avro/tool.py
index 8c206c9..a6e3fa8 100644
--- a/lang/py/avro/tool.py
+++ b/lang/py/avro/tool.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python3
-# -*- mode: python -*-
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -40,14 +38,14 @@ import avro.protocol
 
 class GenericResponder(avro.ipc.Responder):
     def __init__(self, proto, msg, datum):
-        proto_json = open(proto, 'rb').read()
+        proto_json = open(proto, "rb").read()
         avro.ipc.Responder.__init__(self, avro.protocol.parse(proto_json))
         self.msg = msg
         self.datum = datum
 
     def invoke(self, message, request):
         if message.name == self.msg:
-            print("Message: %s Datum: %s" % (message.name, self.datum), file=sys.stderr)
+            print(f"Message: {message.name} Datum: {self.datum}", file=sys.stderr)
             # server will shut down after processing a single Avro request
             global server_should_shutdown
             server_should_shutdown = True
@@ -61,7 +59,7 @@ class GenericHandler(http.server.BaseHTTPRequestHandler):
         call_request = call_request_reader.read_framed_message()
         resp_body = self.responder.respond(call_request)
         self.send_response(200)
-        self.send_header('Content-Type', 'avro/binary')
+        self.send_header("Content-Type", "avro/binary")
         self.end_headers()
         resp_writer = avro.ipc.FramedWriter(self.wfile)
         resp_writer.write_framed_message(resp_body)
@@ -80,7 +78,7 @@ def run_server(uri, proto, msg, datum):
     server_should_shutdown = False
     responder = GenericResponder(proto, msg, datum)
     server = http.server.HTTPServer(server_addr, GenericHandler)
-    print("Port: %s" % server.server_port)
+    print(f"Port: {server.server_port}")
     sys.stdout.flush()
     server.allow_reuse_address = True
     print("Starting server.", file=sys.stderr)
@@ -90,32 +88,32 @@ def run_server(uri, proto, msg, datum):
 def send_message(uri, proto, msg, datum):
     url_obj = urllib.parse.urlparse(uri)
     client = avro.ipc.HTTPTransceiver(url_obj.hostname, url_obj.port)
-    proto_json = open(proto, 'rb').read()
+    proto_json = open(proto, "rb").read()
     requestor = avro.ipc.Requestor(avro.protocol.parse(proto_json), client)
     print(requestor.request(msg, datum))
 
+
 ##
 # TODO: Replace this with fileinput()
 
 
 def file_or_stdin(f):
-    return sys.stdin if f == '-' else open(f, 'rb')
+    return sys.stdin if f == "-" else open(f, "rb")
 
 
 def main(args=sys.argv):
     if len(args) == 1:
-        print("Usage: %s [dump|rpcreceive|rpcsend]" % args[0])
+        print(f"Usage: {args[0]} [dump|rpcreceive|rpcsend]")
         return 1
 
     if args[1] == "dump":
         if len(args) != 3:
-            print("Usage: %s dump input_file" % args[0])
+            print(f"Usage: {args[0]} dump input_file")
             return 1
         for d in avro.datafile.DataFileReader(file_or_stdin(args[2]), avro.io.DatumReader()):
             print(repr(d))
     elif args[1] == "rpcreceive":
-        usage_str = "Usage: %s rpcreceive uri protocol_file " % args[0]
-        usage_str += "message_name (-data d | -file f)"
+        usage_str = f"Usage: {args[0]} rpcreceive uri protocol_file message_name (-data d | -file f)"
         if len(args) not in [5, 7]:
             print(usage_str)
             return 1
@@ -123,7 +121,7 @@ def main(args=sys.argv):
         datum = None
         if len(args) > 5:
             if args[5] == "-file":
-                reader = open(args[6], 'rb')
+                reader = open(args[6], "rb")
                 datum_reader = avro.io.DatumReader()
                 dfr = avro.datafile.DataFileReader(reader, datum_reader)
                 datum = next(dfr)
@@ -135,8 +133,7 @@ def main(args=sys.argv):
                 return 1
         run_server(uri, proto, msg, datum)
     elif args[1] == "rpcsend":
-        usage_str = "Usage: %s rpcsend uri protocol_file " % args[0]
-        usage_str += "message_name (-data d | -file f)"
+        usage_str = f"Usage: {args[0]} rpcsend uri protocol_file message_name (-data d | -file f)"
         if len(args) not in [5, 7]:
             print(usage_str)
             return 1
@@ -144,7 +141,7 @@ def main(args=sys.argv):
         datum = None
         if len(args) > 5:
             if args[5] == "-file":
-                reader = open(args[6], 'rb')
+                reader = open(args[6], "rb")
                 datum_reader = avro.io.DatumReader()
                 dfr = avro.datafile.DataFileReader(reader, datum_reader)
                 datum = next(dfr)
@@ -160,7 +157,9 @@ def main(args=sys.argv):
 
 if __name__ == "__main__":
     if os.path.dirname(avro.io.__file__) in sys.path:
-        warnings.warn("Invoking avro/tool.py directly is likely to lead to a name collision "
-                      "with the python io module. Try doing `python -m avro.tool` instead.")
+        warnings.warn(
+            "Invoking avro/tool.py directly is likely to lead to a name collision "
+            "with the python io module. Try doing `python -m avro.tool` instead."
+        )
 
     sys.exit(main(sys.argv))
diff --git a/lang/py/pyproject.toml b/lang/py/pyproject.toml
new file mode 100644
index 0000000..74c70fc
--- /dev/null
+++ b/lang/py/pyproject.toml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[tool.black]
+line-length = 150
+
+[tool.isort]
+profile = 'black'
diff --git a/lang/py/scripts/avro b/lang/py/scripts/avro
index ad07fd7..ffb8a00 100755
--- a/lang/py/scripts/avro
+++ b/lang/py/scripts/avro
@@ -216,7 +216,7 @@ def write(opts, files):
             schema = avro.schema.parse(schema_file.read())
         out = _open(opts.output, "wb")
     except (IOError, OSError) as e:
-        raise avro.errors.UsageError("Can't open file - %s" % e)
+        raise avro.errors.UsageError(f"Can't open file - {e}")
 
     writer = avro.datafile.DataFileWriter(getattr(out, 'buffer', out), avro.io.DatumWriter(), schema)
 
@@ -271,7 +271,7 @@ def main(argv):
             "write": write,
         }[command_name]
     except KeyError:
-        raise avro.errors.UsageError("Unknown command - {!s}".format(command_name))
+        raise avro.errors.UsageError(f"Unknown command - {command_name!s}")
     command(opts, args)
 
 
diff --git a/lang/py/setup.py b/lang/py/setup.py
index e8565c2..d8d0fa3 100755
--- a/lang/py/setup.py
+++ b/lang/py/setup.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
 
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -19,15 +18,16 @@
 # limitations under the License.
 
 
-import setuptools  # type: ignore
 import distutils.errors
 import glob
 import os
 import subprocess
 
+import setuptools  # type: ignore
+
 _HERE = os.path.dirname(os.path.abspath(__file__))
-_AVRO_DIR = os.path.join(_HERE, 'avro')
-_VERSION_FILE_NAME = 'VERSION.txt'
+_AVRO_DIR = os.path.join(_HERE, "avro")
+_VERSION_FILE_NAME = "VERSION.txt"
 
 
 def _is_distribution():
@@ -39,7 +39,7 @@ def _is_distribution():
     """
     # If a file PKG-INFO exists as a sibling of setup.py,
     # assume we are running as source distribution:
-    return os.path.exists(os.path.join(_HERE, 'PKG-INFO'))
+    return os.path.exists(os.path.join(_HERE, "PKG-INFO"))
 
 
 def _generate_package_data():
@@ -48,30 +48,30 @@ def _generate_package_data():
     This data will already exist in a distribution package,
     so this function only runs for local version control work tree.
     """
-    distutils.log.info('Generating package data')
+    distutils.log.info("Generating package data")
 
     # Avro top-level source directory:
     root_dir = os.path.dirname(os.path.dirname(_HERE))
-    share_dir = os.path.join(root_dir, 'share')
+    share_dir = os.path.join(root_dir, "share")
 
     # Create a PEP440 compliant version file.
     version_file_path = os.path.join(share_dir, _VERSION_FILE_NAME)
-    with open(version_file_path, 'rb') as vin:
-        version = vin.read().replace(b'-', b'+')
-    with open(os.path.join(_AVRO_DIR, _VERSION_FILE_NAME), 'wb') as vout:
+    with open(version_file_path, "rb") as vin:
+        version = vin.read().replace(b"-", b"+")
+    with open(os.path.join(_AVRO_DIR, _VERSION_FILE_NAME), "wb") as vout:
         vout.write(version)
 
-    avro_schemas_dir = os.path.join(share_dir, 'schemas', 'org', 'apache', 'avro')
-    ipc_dir = os.path.join(avro_schemas_dir, 'ipc')
-    tether_dir = os.path.join(avro_schemas_dir, 'mapred', 'tether')
+    avro_schemas_dir = os.path.join(share_dir, "schemas", "org", "apache", "avro")
+    ipc_dir = os.path.join(avro_schemas_dir, "ipc")
+    tether_dir = os.path.join(avro_schemas_dir, "mapred", "tether")
 
     # Copy necessary avsc files:
     avsc_files = (
-        ((share_dir, 'test', 'schemas', 'interop.avsc'), ('',)),
-        ((ipc_dir, 'HandshakeRequest.avsc'), ('',)),
-        ((ipc_dir, 'HandshakeResponse.avsc'), ('',)),
-        ((tether_dir, 'InputProtocol.avpr'), ('tether',)),
-        ((tether_dir, 'OutputProtocol.avpr'), ('tether',)),
+        ((share_dir, "test", "schemas", "interop.avsc"), ("",)),
+        ((ipc_dir, "HandshakeRequest.avsc"), ("",)),
+        ((ipc_dir, "HandshakeResponse.avsc"), ("",)),
+        ((tether_dir, "InputProtocol.avpr"), ("tether",)),
+        ((tether_dir, "OutputProtocol.avpr"), ("tether",)),
     )
 
     for src, dst in avsc_files:
@@ -84,13 +84,13 @@ class GenerateInteropDataCommand(setuptools.Command):
     """A command to generate Avro files for data interop test."""
 
     user_options = [
-        ('schema-file=', None, 'path to input Avro schema file'),
-        ('output-path=', None, 'path to output Avro data files'),
+        ("schema-file=", None, "path to input Avro schema file"),
+        ("output-path=", None, "path to output Avro data files"),
     ]
 
     def initialize_options(self):
-        self.schema_file = os.path.join(_AVRO_DIR, 'interop.avsc')
-        self.output_path = os.path.join(_AVRO_DIR, 'test', 'interop', 'data')
+        self.schema_file = os.path.join(_AVRO_DIR, "interop.avsc")
+        self.output_path = os.path.join(_AVRO_DIR, "test", "interop", "data")
 
     def finalize_options(self):
         pass
@@ -99,10 +99,10 @@ class GenerateInteropDataCommand(setuptools.Command):
         # Late import -- this can only be run when avro is on the pythonpath,
         # more or less after install.
         import avro.test.gen_interop_data
+
         if not os.path.exists(self.output_path):
             os.makedirs(self.output_path)
-        avro.test.gen_interop_data.generate(self.schema_file,
-                                            os.path.join(self.output_path, 'py.avro'))
+        avro.test.gen_interop_data.generate(self.schema_file, os.path.join(self.output_path, "py.avro"))
 
 
 def _get_version():
@@ -123,10 +123,12 @@ def main():
     if not _is_distribution():
         _generate_package_data()
 
-    setuptools.setup(cmdclass={
-        "generate_interop_data": GenerateInteropDataCommand,
-    })
+    setuptools.setup(
+        cmdclass={
+            "generate_interop_data": GenerateInteropDataCommand,
+        }
+    )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
diff --git a/lang/py/tox.ini b/lang/py/tox.ini
index 1495788..569846c 100644
--- a/lang/py/tox.ini
+++ b/lang/py/tox.ini
@@ -67,12 +67,12 @@ commands_post =
 
 [testenv:lint]
 deps =
+    black
     isort
-    pycodestyle
 commands_pre =
 commands =
+    black --check .
     isort --check-only .
-    pycodestyle
 commands_post =
 
 [testenv:typechecks]
@@ -83,13 +83,3 @@ extras =
     mypy
 commands =
     mypy
-
-[tool:isort]
-extra_standard_library = setuptools
-force_to_top = setuptools
-line_length = 150
-
-[pycodestyle]
-exclude = .eggs,.tox,build
-max-line-length = 150
-statistics = True