You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@beam.apache.org by bh...@apache.org on 2022/06/14 21:44:17 UTC

[beam] branch master updated: Update references to Jira to GH for the Python SDK (#21831)

This is an automated email from the ASF dual-hosted git repository.

bhulette pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/beam.git


The following commit(s) were added to refs/heads/master by this push:
     new 8a1aad68243 Update references to Jira to GH for the Python SDK (#21831)
8a1aad68243 is described below

commit 8a1aad6824319abf662570de36e2580db8e9450e
Author: Danny McCormick <da...@google.com>
AuthorDate: Tue Jun 14 17:44:10 2022 -0400

    Update references to Jira to GH for the Python SDK (#21831)
    
    * Update references to Jira to GH for the Python SDK
    
    * Fix a few breakages
    
    * One more fix
    
    * line length linting
    
    * Fix string
    
    * A few more replacements
    
    * Use links instead of Issue #
    
    * Formatting
    
    * Formatting
    
    * Formatting
    
    * Formatting
    
    * Formatting
    
    * Formatting
    
    * Extraneous space
    
    * One more long line
---
 sdks/python/.pylintrc                              |   4 +-
 sdks/python/apache_beam/coders/coder_impl.py       |  26 ++---
 sdks/python/apache_beam/coders/coders.py           |   7 +-
 sdks/python/apache_beam/coders/row_coder.py        |   3 +-
 sdks/python/apache_beam/coders/row_coder_test.py   |   7 +-
 sdks/python/apache_beam/coders/typecoders_test.py  |   2 +-
 sdks/python/apache_beam/dataframe/frame_base.py    |   8 +-
 sdks/python/apache_beam/dataframe/frames.py        | 113 ++++++++++++---------
 sdks/python/apache_beam/dataframe/frames_test.py   |  72 +++++++------
 sdks/python/apache_beam/dataframe/io.py            |   4 +-
 sdks/python/apache_beam/dataframe/io_test.py       |   4 +-
 .../apache_beam/dataframe/pandas_doctests_test.py  |  23 +++--
 sdks/python/apache_beam/dataframe/schemas_test.py  |   5 +-
 .../examples/complete/game/game_stats_it_test.py   |   4 +-
 .../examples/complete/game/leader_board_it_test.py |   4 +-
 .../examples/complete/juliaset/setup.py            |   6 +-
 .../examples/dataframe/taxiride_test.py            |   3 +-
 .../transforms/aggregation/combineglobally_test.py |   4 +-
 .../examples/streaming_wordcount_debugging.py      |   3 +-
 .../streaming_wordcount_debugging_it_test.py       |   3 +-
 sdks/python/apache_beam/io/external/gcp/pubsub.py  |   4 +-
 .../io/external/xlang_jdbcio_it_test.py            |   6 +-
 .../io/external/xlang_kinesisio_it_test.py         |   6 +-
 sdks/python/apache_beam/io/fileio.py               |   2 +-
 sdks/python/apache_beam/io/fileio_test.py          |   9 +-
 .../io/gcp/big_query_query_to_table_it_test.py     |   5 +-
 sdks/python/apache_beam/io/gcp/bigquery.py         |  17 ++--
 .../apache_beam/io/gcp/bigquery_file_loads.py      |   7 +-
 .../apache_beam/io/gcp/bigquery_read_it_test.py    |   3 +-
 sdks/python/apache_beam/io/gcp/bigtableio.py       |   4 +-
 sdks/python/apache_beam/io/gcp/bigtableio_test.py  |   3 +-
 .../apache_beam/io/gcp/gcsio_integration_test.py   |   6 +-
 sdks/python/apache_beam/io/gcp/pubsub.py           |   4 +-
 .../apache_beam/io/gcp/pubsub_integration_test.py  |   7 +-
 sdks/python/apache_beam/io/gcp/pubsub_test.py      |   3 +-
 sdks/python/apache_beam/io/watermark_estimators.py |   4 +-
 sdks/python/apache_beam/ml/inference/base.py       |  17 ++--
 .../ml/inference/sklearn_inference_test.py         |  20 +++-
 .../python/apache_beam/options/pipeline_options.py |   5 +-
 .../apache_beam/options/pipeline_options_test.py   |   9 +-
 .../apache_beam/options/value_provider_test.py     |   6 +-
 sdks/python/apache_beam/pipeline.py                |  17 ++--
 sdks/python/apache_beam/runners/common.py          |  36 ++++---
 .../runners/dataflow/dataflow_metrics.py           |   7 +-
 .../runners/dataflow/dataflow_metrics_test.py      |   4 +-
 .../runners/dataflow/dataflow_runner.py            |  17 ++--
 .../runners/dataflow/dataflow_runner_test.py       |  10 +-
 .../runners/dataflow/internal/apiclient.py         |   5 +-
 .../runners/dataflow/test_dataflow_runner.py       |   4 +-
 .../apache_beam/runners/direct/direct_runner.py    |   3 +-
 .../runners/direct/direct_runner_test.py           |   4 +-
 .../runners/direct/transform_evaluator.py          |   5 +-
 .../runners/interactive/augmented_pipeline.py      |   9 +-
 .../runners/interactive/caching/streaming_cache.py |   6 +-
 .../dataproc/dataproc_cluster_manager.py           |   5 +-
 .../runners/interactive/interactive_beam.py        |   5 +-
 .../runners/interactive/interactive_runner.py      |   3 +-
 .../runners/interactive/recording_manager.py       |   4 +-
 .../apache_beam/runners/interactive/utils.py       |   4 +-
 .../python/apache_beam/runners/pipeline_context.py |   6 +-
 .../runners/portability/flink_runner_test.py       |  17 ++--
 .../portability/fn_api_runner/fn_runner_test.py    |  16 +--
 .../portability/fn_api_runner/translations.py      |   6 +-
 .../portability/fn_api_runner/worker_handlers.py   |   3 +-
 .../runners/portability/local_job_service.py       |   4 +-
 .../runners/portability/portable_runner.py         |   7 +-
 .../runners/portability/portable_runner_test.py    |  10 +-
 .../runners/portability/samza_runner_test.py       |  16 +--
 .../runners/portability/spark_runner_test.py       |  20 ++--
 .../portability/spark_uber_jar_job_server.py       |   4 +-
 .../apache_beam/runners/portability/stager_test.py |  11 +-
 .../apache_beam/runners/worker/bundle_processor.py |   3 +-
 .../apache_beam/runners/worker/data_plane.py       |   6 +-
 .../apache_beam/runners/worker/operations.py       |   4 +-
 .../apache_beam/runners/worker/sdk_worker_main.py  |   3 +-
 .../testing/benchmarks/nexmark/queries/query10.py  |   5 +-
 .../testing/benchmarks/nexmark/queries/query4.py   |   3 +-
 .../testing/benchmarks/nexmark/queries/query5.py   |   3 +-
 .../python/apache_beam/testing/test_stream_test.py |  21 ++--
 .../apache_beam/tools/coders_microbenchmark.py     |   3 +-
 .../transforms/combinefn_lifecycle_test.py         |   2 +-
 sdks/python/apache_beam/transforms/combiners.py    |  10 +-
 sdks/python/apache_beam/transforms/core.py         |   9 +-
 sdks/python/apache_beam/transforms/ptransform.py   |   2 +-
 .../apache_beam/transforms/ptransform_test.py      |   3 +-
 .../apache_beam/transforms/sideinputs_test.py      |  12 +--
 sdks/python/apache_beam/transforms/stats.py        |   3 +-
 sdks/python/apache_beam/transforms/trigger.py      |  14 +--
 sdks/python/apache_beam/transforms/trigger_test.py |   8 +-
 .../apache_beam/transforms/userstate_test.py       |   4 +-
 sdks/python/apache_beam/transforms/util.py         |   6 +-
 sdks/python/apache_beam/typehints/batch.py         |   5 +-
 .../typehints/native_type_compatibility.py         |   8 +-
 .../typehints/native_type_compatibility_test.py    |   4 +-
 .../apache_beam/typehints/typed_pipeline_test.py   |  21 ++--
 sdks/python/apache_beam/typehints/typehints.py     |   9 +-
 sdks/python/apache_beam/utils/counters.py          |   3 +-
 sdks/python/apache_beam/utils/retry.py             |   4 +-
 sdks/python/apache_beam/utils/timestamp.py         |  12 ++-
 sdks/python/apache_beam/utils/timestamp_test.py    |   6 +-
 sdks/python/apache_beam/utils/urns.py              |   6 +-
 sdks/python/build-requirements.txt                 |   2 +-
 sdks/python/mypy.ini                               |   2 +-
 sdks/python/scripts/run_integration_test.sh        |   2 +-
 sdks/python/test-suites/dataflow/common.gradle     |   2 +-
 sdks/python/test-suites/direct/xlang/build.gradle  |   2 +-
 sdks/python/test-suites/portable/common.gradle     |   2 +-
 sdks/python/test-suites/tox/py37/build.gradle      |   2 +-
 sdks/python/test-suites/tox/py38/build.gradle      |   2 +-
 sdks/python/test-suites/tox/py39/build.gradle      |   2 +-
 sdks/python/test-suites/tox/pycommon/build.gradle  |   2 +-
 sdks/python/tox.ini                                |   6 +-
 112 files changed, 550 insertions(+), 398 deletions(-)

diff --git a/sdks/python/.pylintrc b/sdks/python/.pylintrc
index de241c03936..eedd234ed7e 100644
--- a/sdks/python/.pylintrc
+++ b/sdks/python/.pylintrc
@@ -122,7 +122,7 @@ disable =
   not-callable,
   pointless-statement,
   protected-access,
-  raise-missing-from, #TODO(BEAM-12991) Enable and fix warnings
+  raise-missing-from, #TODO(https://github.com/apache/beam/issues/21169) Enable and fix warnings
   raising-format-tuple,
   redefined-builtin,
   redefined-outer-name,
@@ -142,7 +142,7 @@ disable =
   unnecessary-pass,
   unneeded-not,
   unsubscriptable-object,
-  unspecified-encoding, #TODO(BEAM-12992) Enable explicit encoding
+  unspecified-encoding, #TODO(https://github.com/apache/beam/issues/21236) Enable explicit encoding
   unused-argument,
   unused-wildcard-import,
   useless-object-inheritance,
diff --git a/sdks/python/apache_beam/coders/coder_impl.py b/sdks/python/apache_beam/coders/coder_impl.py
index dd59b08c44b..c8936e71be3 100644
--- a/sdks/python/apache_beam/coders/coder_impl.py
+++ b/sdks/python/apache_beam/coders/coder_impl.py
@@ -1222,11 +1222,12 @@ class SequenceCoderImpl(StreamCoderImpl):
               elem, nested=True))
       estimated_size += child_size
       observables += child_observables
-    # TODO: (BEAM-1537) Update to use an accurate count depending on size and
-    # count, currently we are underestimating the size by up to 10 bytes
-    # per block of data since we are not including the count prefix which
-    # occurs at most once per 64k of data and is upto 10 bytes long. The upper
-    # bound of the underestimate is 10 / 65536 ~= 0.0153% of the actual size.
+    # TODO: (https://github.com/apache/beam/issues/18169) Update to use an
+    # accurate count depending on size and count, currently we are
+    # underestimating the size by up to 10 bytes per block of data since we are
+    # not including the count prefix which occurs at most once per 64k of data
+    # and is upto 10 bytes long. The upper bound of the underestimate is
+    # 10 / 65536 ~= 0.0153% of the actual size.
     # TODO: More efficient size estimation in the case of state-backed
     # iterables.
     return estimated_size, observables
@@ -1265,7 +1266,8 @@ class _AbstractIterable(object):
 
 FastPrimitivesCoderImpl.register_iterable_like_type(_AbstractIterable)
 
-# TODO(BEAM-13066): Enable using abstract iterables permanently
+# TODO(https://github.com/apache/beam/issues/21167): Enable using abstract
+# iterables permanently
 _iterable_coder_uses_abstract_iterable_by_default = False
 
 
@@ -1386,8 +1388,8 @@ class WindowedValueCoderImpl(StreamCoderImpl):
 
   # Ensure that lexicographic ordering of the bytes corresponds to
   # chronological order of timestamps.
-  # TODO(BEAM-1524): Clean this up once we have a BEAM wide consensus on
-  # byte representation of timestamps.
+  # TODO(https://github.com/apache/beam/issues/18190): Clean this up once we
+  # have a BEAM wide consensus on byte representation of timestamps.
   def _to_normal_time(self, value):
     """Convert "lexicographically ordered unsigned" to signed."""
     return value - _TIME_SHIFT
@@ -1412,8 +1414,8 @@ class WindowedValueCoderImpl(StreamCoderImpl):
         # Convert to postive number and divide, since python rounds off to the
         # lower negative number. For ex: -3 / 2 = -2, but we expect it to be -1,
         # to be consistent across SDKs.
-        # TODO(BEAM-1524): Clean this up once we have a BEAM wide consensus on
-        # precision of timestamps.
+        # TODO(https://github.com/apache/beam/issues/18190): Clean this up once
+        # we have a BEAM wide consensus on precision of timestamps.
         self._from_normal_time(
             restore_sign * (
                 abs(
@@ -1431,8 +1433,8 @@ class WindowedValueCoderImpl(StreamCoderImpl):
     # of precision while converting to millis.
     # Note: This is only a best effort here as there is no way to know if these
     # were indeed MIN/MAX timestamps.
-    # TODO(BEAM-1524): Clean this up once we have a BEAM wide consensus on
-    # precision of timestamps.
+    # TODO(https://github.com/apache/beam/issues/18190): Clean this up once we
+    # have a BEAM wide consensus on precision of timestamps.
     if timestamp <= -(abs(MIN_TIMESTAMP_micros) // 1000):
       timestamp = MIN_TIMESTAMP_micros
     elif timestamp >= MAX_TIMESTAMP_micros // 1000:
diff --git a/sdks/python/apache_beam/coders/coders.py b/sdks/python/apache_beam/coders/coders.py
index 317add98af0..0d9bc536aa3 100644
--- a/sdks/python/apache_beam/coders/coders.py
+++ b/sdks/python/apache_beam/coders/coders.py
@@ -238,7 +238,9 @@ class Coder(object):
     return self.__dict__
 
   def to_type_hint(self):
-    raise NotImplementedError('BEAM-2717: %s' % self.__class__.__name__)
+    raise NotImplementedError(
+        'https://github.com/apache/beam/issues/18490: %s' %
+        self.__class__.__name__)
 
   @classmethod
   def from_type_hint(cls, unused_typehint, unused_registry):
@@ -1129,7 +1131,8 @@ class AvroGenericCoder(FastCoder):
     return coder_impl.AvroCoderImpl(self.schema)
 
   def is_deterministic(self):
-    # TODO(BEAM-7903): need to confirm if it's deterministic
+    # TODO(https://github.com/apache/beam/issues/19628): need to confirm if
+    # it's deterministic
     return False
 
   def __eq__(self, other):
diff --git a/sdks/python/apache_beam/coders/row_coder.py b/sdks/python/apache_beam/coders/row_coder.py
index bf15764b5aa..8f3421ca70b 100644
--- a/sdks/python/apache_beam/coders/row_coder.py
+++ b/sdks/python/apache_beam/coders/row_coder.py
@@ -112,7 +112,8 @@ class RowCoder(FastCoder):
 
   @classmethod
   def from_type_hint(cls, type_hint, registry):
-    # TODO(BEAM-14250): Remove once all runners are portable.
+    # TODO(https://github.com/apache/beam/issues/21541): Remove once all
+    # runners are portable.
     if isinstance(type_hint, str):
       import importlib
       main_module = importlib.import_module('__main__')
diff --git a/sdks/python/apache_beam/coders/row_coder_test.py b/sdks/python/apache_beam/coders/row_coder_test.py
index 43ce0403b01..67ee06dad26 100644
--- a/sdks/python/apache_beam/coders/row_coder_test.py
+++ b/sdks/python/apache_beam/coders/row_coder_test.py
@@ -183,13 +183,14 @@ class RowCoderTest(unittest.TestCase):
       self.assertEqual(test_case, coder.decode(coder.encode(test_case)))
 
   @unittest.skip(
-      "BEAM-8030 - Overflow behavior in VarIntCoder is currently inconsistent")
+      "https://github.com/apache/beam/issues/19696 - Overflow behavior in "
+      "VarIntCoder is currently inconsistent")
   def test_overflows(self):
     IntTester = typing.NamedTuple(
         'IntTester',
         [
-            # TODO(BEAM-7996): Test int8 and int16 here as well when those
-            # types are supported
+            # TODO(https://github.com/apache/beam/issues/19815): Test int8 and
+            # int16 here as well when those types are supported
             # ('i8', typing.Optional[np.int8]),
             # ('i16', typing.Optional[np.int16]),
             ('i32', typing.Optional[np.int32]),
diff --git a/sdks/python/apache_beam/coders/typecoders_test.py b/sdks/python/apache_beam/coders/typecoders_test.py
index 442aa24a8b0..3adc8255409 100644
--- a/sdks/python/apache_beam/coders/typecoders_test.py
+++ b/sdks/python/apache_beam/coders/typecoders_test.py
@@ -121,7 +121,7 @@ class TypeCodersTest(unittest.TestCase):
     self.assertEqual(expected_coder, real_coder)
     self.assertEqual(real_coder.encode(values), expected_coder.encode(values))
 
-  @unittest.skip('BEAM-14411')
+  @unittest.skip('https://github.com/apache/beam/issues/21658')
   def test_list_coder(self):
     real_coder = typecoders.registry.get_coder(typehints.List[bytes])
     expected_coder = coders.IterableCoder(coders.BytesCoder())
diff --git a/sdks/python/apache_beam/dataframe/frame_base.py b/sdks/python/apache_beam/dataframe/frame_base.py
index 145172e3b36..24497f1de06 100644
--- a/sdks/python/apache_beam/dataframe/frame_base.py
+++ b/sdks/python/apache_beam/dataframe/frame_base.py
@@ -415,11 +415,13 @@ def wont_implement_method(base_type, name, reason=None, explanation=None):
   return wrapper
 
 
-def not_implemented_method(op, jira='BEAM-9547', base_type=None):
+def not_implemented_method(op, issue='20318', base_type=None):
   """Generate a stub method for ``op`` that simply raises a NotImplementedError.
 
   For internal use only. No backwards compatibility guarantees."""
   assert base_type is not None, "base_type must be specified"
+  issue_url = f"https://issues.apache.org/jira/{issue}." if issue.startswith(
+      "BEAM-") else f"https://github.com/apache/beam/issues/{issue}"
 
   def wrapper(*args, **kwargs):
     raise NotImplementedError(
@@ -427,7 +429,7 @@ def not_implemented_method(op, jira='BEAM-9547', base_type=None):
         f"If support for {op!r} is important to you, please let the Beam "
         "community know by writing to user@beam.apache.org "
         "(see https://beam.apache.org/community/contact-us/) or commenting on "
-        f"https://issues.apache.org/jira/{jira}.")
+        f"{issue_url}")
 
   wrapper.__name__ = op
   wrapper.__doc__ = (
@@ -436,7 +438,7 @@ def not_implemented_method(op, jira='BEAM-9547', base_type=None):
       f"If support for {op!r} is important to you, please let the Beam "
       "community know by `writing to user@beam.apache.org "
       "<https://beam.apache.org/community/contact-us/>`_ or commenting on "
-      f"`{jira} <https://issues.apache.org/jira/{jira}>`_.")
+      f"`{issue} <{issue_url}>`_.")
 
   return wrapper
 
diff --git a/sdks/python/apache_beam/dataframe/frames.py b/sdks/python/apache_beam/dataframe/frames.py
index ec8a664041e..1349573c9f5 100644
--- a/sdks/python/apache_beam/dataframe/frames.py
+++ b/sdks/python/apache_beam/dataframe/frames.py
@@ -358,7 +358,7 @@ class DeferredDataFrameOrSeries(frame_base.DeferredFrame):
 
     Aggregations grouping by a categorical column with ``observed=False`` set
     are not currently parallelizable
-    (`BEAM-11190 <https://issues.apache.org/jira/browse/BEAM-11190>`_).
+    (`Issue 21827 <https://github.com/apache/beam/issues/21827>`_).
     """
     if not as_index:
       raise NotImplementedError('groupby(as_index=False)')
@@ -519,8 +519,9 @@ class DeferredDataFrameOrSeries(frame_base.DeferredFrame):
         to_group = self.set_index(by)._expr
 
       if grouping_columns:
-        # TODO(BEAM-11711): It should be possible to do this without creating an
-        # expression manually, by using DeferredDataFrame.set_index, i.e.:
+        # TODO(https://github.com/apache/beam/issues/20759):
+        # It should be possible to do this without creating
+        # an expression manually, by using DeferredDataFrame.set_index, i.e.:
         #   to_group_with_index = self.set_index([self.index] +
         #                                        grouping_columns)._expr
         to_group_with_index = expressions.ComputedExpression(
@@ -575,12 +576,13 @@ class DeferredDataFrameOrSeries(frame_base.DeferredFrame):
     if level is not None and not isinstance(level, (tuple, list)):
       level = [level]
     if level is None or len(level) == self._expr.proxy().index.nlevels:
-      # TODO(BEAM-12182): Could do distributed re-index with offsets.
+      # TODO(https://github.com/apache/beam/issues/20859):
+      # Could do distributed re-index with offsets.
       requires_partition_by = partitionings.Singleton(
           reason=(
               f"reset_index(level={level!r}) drops the entire index and "
               "creates a new one, so it cannot currently be parallelized "
-              "(BEAM-12182)."))
+              "(https://github.com/apache/beam/issues/20859)."))
     else:
       requires_partition_by = partitionings.Arbitrary()
     return frame_base.DeferredFrame.wrap(
@@ -1175,7 +1177,7 @@ class DeferredDataFrameOrSeries(frame_base.DeferredFrame):
 
   sparse = property(
       frame_base.not_implemented_method(
-          'sparse', 'BEAM-12425', base_type=pd.DataFrame))
+          'sparse', '20902', base_type=pd.DataFrame))
 
   transform = frame_base._elementwise_method('transform', base=pd.DataFrame)
 
@@ -1534,16 +1536,17 @@ class DeferredSeries(DeferredDataFrameOrSeries):
   @frame_base.populate_defaults(pd.Series)
   def quantile(self, q, **kwargs):
     """quantile is not parallelizable. See
-    `BEAM-12167 <https://github.com/apache/beam/issues/20933>`_ tracking
+    `Issue 20933 <https://github.com/apache/beam/issues/20933>`_ tracking
     the possible addition of an approximate, parallelizable implementation of
     quantile."""
-    # TODO(BEAM-12167): Provide an option for approximate distributed
-    # quantiles
+    # TODO(https://github.com/apache/beam/issues/20933): Provide an option for
+    #  approximate distributed quantiles
     requires = partitionings.Singleton(
         reason=(
             "Computing quantiles across index cannot currently be "
-            "parallelized. See BEAM-12167 tracking the possible addition of an "
-            "approximate, parallelizable implementation of quantile."))
+            "parallelized. See https://github.com/apache/beam/issues/20933 "
+            "tracking the possible addition of an approximate, parallelizable "
+            "implementation of quantile."))
 
     return frame_base.DeferredFrame.wrap(
         expressions.ComputedExpression(
@@ -1561,8 +1564,9 @@ class DeferredSeries(DeferredDataFrameOrSeries):
   @frame_base.args_to_kwargs(pd.Series)
   @frame_base.populate_defaults(pd.Series)
   def var(self, axis, skipna, level, ddof, **kwargs):
-    """Per-level aggregation is not yet supported (BEAM-11777). Only the
-    default, ``level=None``, is allowed."""
+    """Per-level aggregation is not yet supported
+    (https://github.com/apache/beam/issues/21829). Only the default,
+    ``level=None``, is allowed."""
     if level is not None:
       raise NotImplementedError("per-level aggregation")
     if skipna is None or skipna:
@@ -1920,7 +1924,7 @@ class DeferredSeries(DeferredDataFrameOrSeries):
   def sample(self, **kwargs):
     """Only ``n`` and/or ``weights`` may be specified.  ``frac``,
     ``random_state``, and ``replace=True`` are not yet supported.
-    See `BEAM-12476 <https://issues.apache.org/jira/BEAM-12476>`_.
+    See `Issue 21010 <https://github.com/apache/beam/issues/21010>`_.
 
     Note that pandas will raise an error if ``n`` is larger than the length
     of the dataset, while the Beam DataFrame API will simply return the full
@@ -2273,7 +2277,7 @@ class DeferredSeries(DeferredDataFrameOrSeries):
     preserved.
 
     When ``bin`` is specified this operation is not parallelizable. See
-    [BEAM-12441](https://github.com/apache/beam/issues/20903) tracking the
+    [Issue 20903](https://github.com/apache/beam/issues/20903) tracking the
     possible addition of a distributed implementation."""
 
     if sort:
@@ -2340,24 +2344,26 @@ class DeferredSeries(DeferredDataFrameOrSeries):
   def mode(self, *args, **kwargs):
     """mode is not currently parallelizable. An approximate,
     parallelizable implementation of mode may be added in the future
-    (`BEAM-12181 <https://issues.apache.org/jira/BEAM-12181>`_)."""
+    (`Issue 20946 <https://github.com/apache/beam/issues/20946>`_)."""
     return frame_base.DeferredFrame.wrap(
         expressions.ComputedExpression(
             'mode',
             lambda df: df.mode(*args, **kwargs),
             [self._expr],
-            #TODO(BEAM-12181): Can we add an approximate implementation?
+            #TODO(https://github.com/apache/beam/issues/20946):
+            # Can we add an approximate implementation?
             requires_partition_by=partitionings.Singleton(
                 reason=(
                     "mode cannot currently be parallelized. See "
-                    "BEAM-12181 tracking the possble addition of "
-                    "an approximate, parallelizable implementation of mode.")),
+                    "https://github.com/apache/beam/issues/20946 tracking the "
+                    "possble addition of an approximate, parallelizable "
+                    "implementation of mode.")),
             preserves_partition_by=partitionings.Singleton()))
 
   apply = frame_base._elementwise_method('apply', base=pd.Series)
   map = frame_base._elementwise_method('map', base=pd.Series)
-  # TODO(BEAM-11636): Implement transform using type inference to determine the
-  # proxy
+  # TODO(https://github.com/apache/beam/issues/20764): Implement transform
+  # using type inference to determine the proxy
   #transform = frame_base._elementwise_method('transform', base=pd.Series)
 
   @frame_base.with_docs_from(pd.Series)
@@ -2601,7 +2607,7 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
   @frame_base.maybe_inplace
   def set_index(self, keys, **kwargs):
     """``keys`` must be a ``str`` or ``List[str]``. Passing an Index or Series
-    is not yet supported (`BEAM-11711
+    is not yet supported (`Issue 20759
     <https://github.com/apache/beam/issues/20759>`_)."""
     if isinstance(keys, str):
       keys = [keys]
@@ -2609,7 +2615,9 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
     if any(isinstance(k, (_DeferredIndex, frame_base.DeferredFrame))
            for k in keys):
       raise NotImplementedError("set_index with Index or Series instances is "
-                                "not yet supported (BEAM-11711).")
+                                "not yet supported "
+                                "(https://github.com/apache/beam/issues/20759)"
+                                ".")
 
     return frame_base.DeferredFrame.wrap(
       expressions.ComputedExpression(
@@ -3120,7 +3128,7 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
   def sample(self, n, frac, replace, weights, random_state, axis):
     """When ``axis='index'``, only ``n`` and/or ``weights`` may be specified.
     ``frac``, ``random_state``, and ``replace=True`` are not yet supported.
-    See `BEAM-12476 <https://issues.apache.org/jira/BEAM-12476>`_.
+    See `Issue 21010 <https://github.com/apache/beam/issues/21010>`_.
 
     Note that pandas will raise an error if ``n`` is larger than the length
     of the dataset, while the Beam DataFrame API will simply return the full
@@ -3143,7 +3151,8 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
           f"When axis={axis!r}, only n and/or weights may be specified. "
           "frac, random_state, and replace=True are not yet supported "
           f"(got frac={frac!r}, random_state={random_state!r}, "
-          f"replace={replace!r}). See BEAM-12476.")
+          f"replace={replace!r}). See "
+          "https://github.com/apache/beam/issues/21010.")
 
     if n is None:
       n = 1
@@ -3222,7 +3231,7 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
 
     mode with axis="index" is not currently parallelizable. An approximate,
     parallelizable implementation of mode may be added in the future
-    (`BEAM-12181 <https://issues.apache.org/jira/BEAM-12181>`_)."""
+    (`Issue 20946 <https://github.com/apache/beam/issues/20946>`_)."""
 
     if axis == 1 or axis == 'columns':
       # Number of columns is max(number mode values for each row), so we can't
@@ -3236,11 +3245,13 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
             'mode',
             lambda df: df.mode(*args, **kwargs),
             [self._expr],
-            #TODO(BEAM-12181): Can we add an approximate implementation?
+            #TODO(https://github.com/apache/beam/issues/20946):
+            # Can we add an approximate implementation?
             requires_partition_by=partitionings.Singleton(reason=(
                 "mode(axis='index') cannot currently be parallelized. See "
-                "BEAM-12181 tracking the possble addition of an approximate, "
-                "parallelizable implementation of mode."
+                "https://github.com/apache/beam/issues/20946 tracking the "
+                "possble addition of an approximate, parallelizable "
+                "implementation of mode."
             )),
             preserves_partition_by=partitionings.Singleton()))
 
@@ -3275,7 +3286,8 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
     # look for '@<py identifier>'
     if re.search(r'\@[^\d\W]\w*', expr, re.UNICODE):
       raise NotImplementedError("Accessing locals with @ is not yet supported "
-                                "(BEAM-11202)")
+                                "(https://github.com/apache/beam/issues/20626)"
+                                )
 
     result_expr = expressions.ComputedExpression(
         name,
@@ -3295,7 +3307,7 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
   @frame_base.populate_defaults(pd.DataFrame)
   def eval(self, expr, inplace, **kwargs):
     """Accessing local variables with ``@<varname>`` is not yet supported
-    (`BEAM-11202 <https://github.com/apache/beam/issues/20626>`_).
+    (`Issue 20626 <https://github.com/apache/beam/issues/20626>`_).
 
     Arguments ``local_dict``, ``global_dict``, ``level``, ``target``, and
     ``resolvers`` are not yet supported."""
@@ -3306,7 +3318,7 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
   @frame_base.populate_defaults(pd.DataFrame)
   def query(self, expr, inplace, **kwargs):
     """Accessing local variables with ``@<varname>`` is not yet supported
-    (`BEAM-11202 <https://github.com/apache/beam/issues/20626>`_).
+    (`Issue 20626 <https://github.com/apache/beam/issues/20626>`_).
 
     Arguments ``local_dict``, ``global_dict``, ``level``, ``target``, and
     ``resolvers`` are not yet supported."""
@@ -3420,7 +3432,9 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
         right_index=right_index,
         **kwargs)
     if kwargs.get('how', None) == 'cross':
-      raise NotImplementedError("cross join is not yet implemented (BEAM-9547)")
+      raise NotImplementedError(
+        "cross join is not yet implemented "
+        "(https://github.com/apache/beam/issues/20318)")
     if not any([on, left_on, right_on, left_index, right_index]):
       on = [col for col in self_proxy.columns if col in right_proxy.columns]
     if not left_on:
@@ -3554,7 +3568,7 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
   @frame_base.populate_defaults(pd.DataFrame)
   def quantile(self, q, axis, **kwargs):
     """``quantile(axis="index")`` is not parallelizable. See
-    `BEAM-12167 <https://github.com/apache/beam/issues/20933>`_ tracking
+    `Issue 20933 <https://github.com/apache/beam/issues/20933>`_ tracking
     the possible addition of an approximate, parallelizable implementation of
     quantile.
 
@@ -3571,12 +3585,13 @@ class DeferredDataFrame(DeferredDataFrameOrSeries):
       else:
         requires = partitionings.Arbitrary()
     else: # axis='index'
-      # TODO(BEAM-12167): Provide an option for approximate distributed
-      # quantiles
+      # TODO(https://github.com/apache/beam/issues/20933): Provide an option
+      # for approximate distributed quantiles
       requires = partitionings.Singleton(reason=(
           "Computing quantiles across index cannot currently be parallelized. "
-          "See BEAM-12167 tracking the possible addition of an approximate, "
-          "parallelizable implementation of quantile."
+          "See https://github.com/apache/beam/issues/20933 tracking the "
+          "possible addition of an approximate, parallelizable implementation "
+          "of quantile."
       ))
 
     return frame_base.DeferredFrame.wrap(
@@ -4116,7 +4131,8 @@ class DeferredGroupBy(frame_base.DeferredFrame):
         self._ungrouped.proxy().index.nlevels > 1):
       raise NotImplementedError(
           "dropna=False does not work as intended in the Beam DataFrame API "
-          "when grouping on multiple columns or indexes (See BEAM-12495).")
+          "when grouping on multiple columns or indexes (See "
+          "https://github.com/apache/beam/issues/21014).")
 
   def __getattr__(self, name):
     return DeferredGroupBy(
@@ -4462,7 +4478,8 @@ class DeferredGroupBy(frame_base.DeferredFrame):
   ohlc = frame_base.wont_implement_method(DataFrameGroupBy, 'ohlc',
                                           reason='order-sensitive')
 
-  # TODO(BEAM-12169): Consider allowing this for categorical keys.
+  # TODO(https://github.com/apache/beam/issues/20958): Consider allowing this
+  # for categorical keys.
   __len__ = frame_base.wont_implement_method(
       DataFrameGroupBy, '__len__', reason="non-deferred-result")
   groups = property(frame_base.wont_implement_method(
@@ -4538,7 +4555,7 @@ def _liftable_agg(meth, postagg_meth=None):
         [pre_agg],
         requires_partition_by=(partitionings.Singleton(reason=(
             "Aggregations grouped by a categorical column are not currently "
-            "parallelizable (BEAM-11190)."
+            "parallelizable (https://github.com/apache/beam/issues/21827)."
         ))
                                if is_categorical_grouping
                                else partitionings.Index()),
@@ -4570,7 +4587,7 @@ def _unliftable_agg(meth):
         [self._ungrouped],
         requires_partition_by=(partitionings.Singleton(reason=(
             "Aggregations grouped by a categorical column are not currently "
-            "parallelizable (BEAM-11190)."
+            "parallelizable (https://github.com/apache/beam/issues/21827)."
         ))
                                if is_categorical_grouping
                                else partitionings.Index()),
@@ -4887,9 +4904,9 @@ class _DeferredStringMethods(frame_base.DeferredBase):
               'repeat',
               lambda series: series.str.repeat(repeats),
               [self._expr],
-              # TODO(BEAM-11155): Defer to pandas to compute this proxy.
-              # Currently it incorrectly infers dtype bool, may require upstream
-              # fix.
+              # TODO(https://github.com/apache/beam/issues/20573): Defer to
+              # pandas to compute this proxy. Currently it incorrectly infers
+              # dtype bool, may require upstream fix.
               proxy=self._expr.proxy(),
               requires_partition_by=partitionings.Arbitrary(),
               preserves_partition_by=partitionings.Arbitrary()))
@@ -4899,9 +4916,9 @@ class _DeferredStringMethods(frame_base.DeferredBase):
               'repeat',
               lambda series, repeats_series: series.str.repeat(repeats_series),
               [self._expr, repeats._expr],
-              # TODO(BEAM-11155): Defer to pandas to compute this proxy.
-              # Currently it incorrectly infers dtype bool, may require upstream
-              # fix.
+              # TODO(https://github.com/apache/beam/issues/20573): Defer to
+              # pandas to compute this proxy. Currently it incorrectly infers
+              # dtype bool, may require upstream fix.
               proxy=self._expr.proxy(),
               requires_partition_by=partitionings.Index(),
               preserves_partition_by=partitionings.Arbitrary()))
diff --git a/sdks/python/apache_beam/dataframe/frames_test.py b/sdks/python/apache_beam/dataframe/frames_test.py
index 986396da00b..f3ce6b402d3 100644
--- a/sdks/python/apache_beam/dataframe/frames_test.py
+++ b/sdks/python/apache_beam/dataframe/frames_test.py
@@ -138,7 +138,8 @@ class _AbstractFrameTest(unittest.TestCase):
             This option should NOT be set to False in tests added for new
             operations if at all possible. Instead make sure the new operation
             produces the correct proxy. This flag only exists as an escape hatch
-            until existing failures can be addressed (BEAM-12379).
+            until existing failures can be addressed
+            (https://github.com/apache/beam/issues/20926).
         lenient_dtype_check (bool): Whether or not to check that numeric columns
             are still numeric between actual and proxy. i.e. verify that they
             are at least int64 or float64, and not necessarily have the exact
@@ -278,7 +279,8 @@ class DeferredFrameTest(_AbstractFrameTest):
         'first_name': ['John', 'Anne', 'John', 'Beth'],
         'middle_name': ['Smith', pd.NA, pd.NA, 'Louise']
     })
-    # TODO(BEAM-12495): Remove the assertRaises this when the underlying bug in
+    # TODO(https://github.com/apache/beam/issues/21014): Remove the
+    # assertRaises this when the underlying bug in
     # https://github.com/pandas-dev/pandas/issues/36470 is fixed.
     with self.assertRaises(NotImplementedError):
       self._run_test(lambda df: df.value_counts(dropna=False), df)
@@ -684,8 +686,9 @@ class DeferredFrameTest(_AbstractFrameTest):
 
     if PD_VERSION >= (1, 3):
       # dropna=False is new in pandas 1.3
-      # TODO(BEAM-12495): Remove the assertRaises this when the underlying bug
-      # in https://github.com/pandas-dev/pandas/issues/36470 is fixed.
+      # TODO(https://github.com/apache/beam/issues/21014): Remove the
+      # assertRaises this when the underlying bug in
+      # https://github.com/pandas-dev/pandas/issues/36470 is fixed.
       with self.assertRaises(NotImplementedError):
         self._run_test(lambda df: df.value_counts(dropna=False), df)
 
@@ -774,8 +777,8 @@ class DeferredFrameTest(_AbstractFrameTest):
 
   def test_loc(self):
     dates = pd.date_range('1/1/2000', periods=8)
-    # TODO(BEAM-11757): We do not preserve the freq attribute on a DateTime
-    # index
+    # TODO(https://github.com/apache/beam/issues/20765):
+    # We do not preserve the freq attribute on a DateTime index
     dates.freq = None
     df = pd.DataFrame(
         np.arange(32).reshape((8, 4)),
@@ -1612,8 +1615,8 @@ class GroupByTest(_AbstractFrameTest):
   def test_groupby_agg(self, agg_type):
     if agg_type == 'describe' and PD_VERSION < (1, 2):
       self.skipTest(
-          "BEAM-12366: proxy generation of DataFrameGroupBy.describe "
-          "fails in pandas < 1.2")
+          "https://github.com/apache/beam/issues/20967: proxy generation of "
+          "DataFrameGroupBy.describe fails in pandas < 1.2")
     self._run_test(
         lambda df: df.groupby('group').agg(agg_type),
         GROUPBY_DF,
@@ -1623,8 +1626,8 @@ class GroupByTest(_AbstractFrameTest):
   def test_groupby_with_filter(self, agg_type):
     if agg_type == 'describe' and PD_VERSION < (1, 2):
       self.skipTest(
-          "BEAM-12366: proxy generation of DataFrameGroupBy.describe "
-          "fails in pandas < 1.2")
+          "https://github.com/apache/beam/issues/20967: proxy generation of "
+          "DataFrameGroupBy.describe fails in pandas < 1.2")
     self._run_test(
         lambda df: getattr(df[df.foo > 30].groupby('group'), agg_type)(),
         GROUPBY_DF,
@@ -1634,8 +1637,8 @@ class GroupByTest(_AbstractFrameTest):
   def test_groupby(self, agg_type):
     if agg_type == 'describe' and PD_VERSION < (1, 2):
       self.skipTest(
-          "BEAM-12366: proxy generation of DataFrameGroupBy.describe "
-          "fails in pandas < 1.2")
+          "https://github.com/apache/beam/issues/20967: proxy generation of "
+          "DataFrameGroupBy.describe fails in pandas < 1.2")
 
     self._run_test(
         lambda df: getattr(df.groupby('group'), agg_type)(),
@@ -1646,8 +1649,8 @@ class GroupByTest(_AbstractFrameTest):
   def test_groupby_series(self, agg_type):
     if agg_type == 'describe' and PD_VERSION < (1, 2):
       self.skipTest(
-          "BEAM-12366: proxy generation of DataFrameGroupBy.describe "
-          "fails in pandas < 1.2")
+          "https://github.com/apache/beam/issues/20967: proxy generation of "
+          "DataFrameGroupBy.describe fails in pandas < 1.2")
 
     self._run_test(
         lambda df: getattr(df[df.foo > 40].groupby(df.group), agg_type)(),
@@ -1674,12 +1677,12 @@ class GroupByTest(_AbstractFrameTest):
 
     if agg_type == 'describe':
       self.skipTest(
-          "BEAM-12366: proxy generation of SeriesGroupBy.describe "
-          "fails")
+          "https://github.com/apache/beam/issues/20967: proxy generation of "
+          "SeriesGroupBy.describe fails")
     if agg_type in ('corr', 'cov'):
       self.skipTest(
-          "BEAM-12367: SeriesGroupBy.{corr, cov} do not raise the "
-          "expected error.")
+          "https://github.com/apache/beam/issues/20895: "
+          "SeriesGroupBy.{corr, cov} do not raise the expected error.")
 
     self._run_test(lambda df: getattr(df.groupby('group').foo, agg_type)(), df)
     self._run_test(lambda df: getattr(df.groupby('group').bar, agg_type)(), df)
@@ -1692,8 +1695,8 @@ class GroupByTest(_AbstractFrameTest):
   def test_groupby_project_dataframe(self, agg_type):
     if agg_type == 'describe' and PD_VERSION < (1, 2):
       self.skipTest(
-          "BEAM-12366: proxy generation of DataFrameGroupBy.describe "
-          "fails in pandas < 1.2")
+          "https://github.com/apache/beam/issues/20967: proxy generation of "
+          "DataFrameGroupBy.describe fails in pandas < 1.2")
     self._run_test(
         lambda df: getattr(df.groupby('group')[['bar', 'baz']], agg_type)(),
         GROUPBY_DF,
@@ -1801,7 +1804,7 @@ class GroupByTest(_AbstractFrameTest):
             lambda x: x[x.foo > x.foo.median()]),
         df)
 
-  @unittest.skip('BEAM-11710')
+  @unittest.skip('https://github.com/apache/beam/issues/20762')
   def test_groupby_aggregate_grouped_column(self):
     df = pd.DataFrame({
         'group': ['a' if i % 5 == 0 or i % 3 == 0 else 'b' for i in range(100)],
@@ -1862,8 +1865,8 @@ class GroupByTest(_AbstractFrameTest):
   def test_dataframe_groupby_series(self, agg_type):
     if agg_type == 'describe' and PD_VERSION < (1, 2):
       self.skipTest(
-          "BEAM-12366: proxy generation of DataFrameGroupBy.describe "
-          "fails in pandas < 1.2")
+          "https://github.com/apache/beam/issues/20967: proxy generation of "
+          "DataFrameGroupBy.describe fails in pandas < 1.2")
     self._run_test(
         lambda df: df[df.foo > 40].groupby(df.group).agg(agg_type),
         GROUPBY_DF,
@@ -1877,12 +1880,12 @@ class GroupByTest(_AbstractFrameTest):
   def test_series_groupby_series(self, agg_type):
     if agg_type == 'describe':
       self.skipTest(
-          "BEAM-12366: proxy generation of SeriesGroupBy.describe "
-          "fails")
+          "https://github.com/apache/beam/issues/20967: proxy generation of "
+          "SeriesGroupBy.describe fails")
     if agg_type in ('corr', 'cov'):
       self.skipTest(
-          "BEAM-12367: SeriesGroupBy.{corr, cov} do not raise the "
-          "expected error.")
+          "https://github.com/apache/beam/issues/20895: "
+          "SeriesGroupBy.{corr, cov} do not raise the expected error.")
     self._run_test(
         lambda df: df[df.foo < 40].bar.groupby(df.group).agg(agg_type),
         GROUPBY_DF)
@@ -1906,7 +1909,8 @@ class GroupByTest(_AbstractFrameTest):
   def test_groupby_multiindex_keep_nans(self):
     # Due to https://github.com/pandas-dev/pandas/issues/36470
     # groupby(dropna=False) doesn't work with multiple columns
-    with self.assertRaisesRegex(NotImplementedError, "BEAM-12495"):
+    with self.assertRaisesRegex(NotImplementedError,
+                                "https://github.com/apache/beam/issues/21014"):
       self._run_test(
           lambda df: df.groupby(['foo', 'bar'], dropna=False).sum(), GROUPBY_DF)
 
@@ -1923,7 +1927,8 @@ class AggregationTest(_AbstractFrameTest):
     nonparallel = agg_method in (
         'quantile', 'mean', 'describe', 'median', 'sem', 'mad')
 
-    # TODO(BEAM-12379): max and min produce the wrong proxy
+    # TODO(https://github.com/apache/beam/issues/20926): max and min produce
+    # the wrong proxy
     check_proxy = agg_method not in ('max', 'min')
 
     self._run_test(
@@ -1942,7 +1947,8 @@ class AggregationTest(_AbstractFrameTest):
     nonparallel = agg_method in (
         'quantile', 'mean', 'describe', 'median', 'sem', 'mad')
 
-    # TODO(BEAM-12379): max and min produce the wrong proxy
+    # TODO(https://github.com/apache/beam/issues/20926): max and min produce
+    # the wrong proxy
     check_proxy = agg_method not in ('max', 'min')
 
     self._run_test(
@@ -1958,7 +1964,8 @@ class AggregationTest(_AbstractFrameTest):
     nonparallel = agg_method in (
         'quantile', 'mean', 'describe', 'median', 'sem', 'mad')
 
-    # TODO(BEAM-12379): max and min produce the wrong proxy
+    # TODO(https://github.com/apache/beam/issues/20926): max and min produce
+    # the wrong proxy
     check_proxy = agg_method not in ('max', 'min')
 
     self._run_test(
@@ -1975,7 +1982,8 @@ class AggregationTest(_AbstractFrameTest):
     nonparallel = agg_method in (
         'quantile', 'mean', 'describe', 'median', 'sem', 'mad')
 
-    # TODO(BEAM-12379): max and min produce the wrong proxy
+    # TODO(https://github.com/apache/beam/issues/20926): max and min produce
+    # the wrong proxy
     check_proxy = agg_method not in ('max', 'min')
 
     self._run_test(
diff --git a/sdks/python/apache_beam/dataframe/io.py b/sdks/python/apache_beam/dataframe/io.py
index 0c0baf4331b..4b0593be314 100644
--- a/sdks/python/apache_beam/dataframe/io.py
+++ b/sdks/python/apache_beam/dataframe/io.py
@@ -246,8 +246,8 @@ class _ReadFromPandas(beam.PTransform):
     paths_pcoll = root | beam.Create([self.path])
     match = io.filesystems.FileSystems.match([self.path], limits=[1])[0]
     if not match.metadata_list:
-      # TODO(BEAM-12031): This should be allowed for streaming pipelines if
-      # user provides an explicit schema.
+      # TODO(https://github.com/apache/beam/issues/20858): This should be
+      # allowed for streaming pipelines if user provides an explicit schema.
       raise FileNotFoundError(f"Found no files that match {self.path!r}")
     first_path = match.metadata_list[0].path
     with io.filesystems.FileSystems.open(first_path) as handle:
diff --git a/sdks/python/apache_beam/dataframe/io_test.py b/sdks/python/apache_beam/dataframe/io_test.py
index e235174b946..747a3ab7fe0 100644
--- a/sdks/python/apache_beam/dataframe/io_test.py
+++ b/sdks/python/apache_beam/dataframe/io_test.py
@@ -55,7 +55,9 @@ class MyRow(typing.NamedTuple):
   value: int
 
 
-@unittest.skipIf(platform.system() == 'Windows', 'BEAM-10929')
+@unittest.skipIf(
+    platform.system() == 'Windows',
+    'https://github.com/apache/beam/issues/20642')
 class IOTest(unittest.TestCase):
   def setUp(self):
     self._temp_roots = []
diff --git a/sdks/python/apache_beam/dataframe/pandas_doctests_test.py b/sdks/python/apache_beam/dataframe/pandas_doctests_test.py
index 15cd7bc3848..dd2ce978335 100644
--- a/sdks/python/apache_beam/dataframe/pandas_doctests_test.py
+++ b/sdks/python/apache_beam/dataframe/pandas_doctests_test.py
@@ -24,7 +24,8 @@ from apache_beam.dataframe.frames import PD_VERSION
 from apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function
 
 
-@unittest.skipIf(sys.platform == 'win32', '[BEAM-10626]')
+@unittest.skipIf(
+    sys.platform == 'win32', '[https://github.com/apache/beam/issues/20361]')
 class DoctestTest(unittest.TestCase):
   def test_ndframe_tests(self):
     # IO methods are tested in io_test.py
@@ -285,7 +286,7 @@ class DoctestTest(unittest.TestCase):
                 "df1.merge(df2, how='cross')"
             ],
 
-            # TODO(BEAM-11711)
+            # TODO(https://github.com/apache/beam/issues/20759)
             'pandas.core.frame.DataFrame.set_index': [
                 "df.set_index([s, s**2])",
             ],
@@ -294,7 +295,7 @@ class DoctestTest(unittest.TestCase):
                 "df.set_axis(range(0,2), axis='index')",
             ],
 
-            # TODO(BEAM-12495)
+            # TODO(https://github.com/apache/beam/issues/21014)
             'pandas.core.frame.DataFrame.value_counts': [
               'df.value_counts(dropna=False)'
             ],
@@ -332,8 +333,9 @@ class DoctestTest(unittest.TestCase):
                 'df.rename(index=str).index',
             ],
             'pandas.core.frame.DataFrame.set_index': [
-                # TODO(BEAM-11711): This could pass in the index as
-                # a DeferredIndex, and we should fail it as order-sensitive.
+                # TODO(https://github.com/apache/beam/issues/20759): This could
+                # pass in the index as a DeferredIndex, and we should fail it
+                # as order-sensitive.
                 "df.set_index([pd.Index([1, 2, 3, 4]), 'year'])",
             ],
             'pandas.core.frame.DataFrame.set_axis': [
@@ -517,10 +519,10 @@ class DoctestTest(unittest.TestCase):
             # Relies on setting values with iloc
             'pandas.core.series.Series': ['ser', 'r'],
             'pandas.core.series.Series.groupby': [
-                # TODO(BEAM-11393): This example requires aligning two series
-                # with non-unique indexes. It only works in pandas because
-                # pandas can recognize the indexes are identical and elide the
-                # alignment.
+                # TODO(https://github.com/apache/beam/issues/20643): This
+                # example requires aligning two series with non-unique indexes.
+                # It only works in pandas because pandas can recognize the
+                # indexes are identical and elide the alignment.
                 'ser.groupby(ser > 100).mean()',
             ],
             'pandas.core.series.Series.asfreq': ['*'],
@@ -642,7 +644,8 @@ class DoctestTest(unittest.TestCase):
                 'seconds_series.dt.seconds'
             ],
 
-            # TODO(BEAM-12530): Test data creation fails for these
+            # TODO(https://github.com/apache/beam/issues/21013): Test data
+            # creation fails for these
             #   s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))
             # pylint: disable=line-too-long
             'pandas.core.indexes.accessors.DatetimeProperties.to_pydatetime': [
diff --git a/sdks/python/apache_beam/dataframe/schemas_test.py b/sdks/python/apache_beam/dataframe/schemas_test.py
index 3fbb2d834b5..ed604bcd972 100644
--- a/sdks/python/apache_beam/dataframe/schemas_test.py
+++ b/sdks/python/apache_beam/dataframe/schemas_test.py
@@ -240,8 +240,9 @@ class SchemasTest(unittest.TestCase):
           | schemas.UnbatchPandas(proxy))
 
       # Verify that the unbatched PCollection has the expected typehint
-      # TODO(BEAM-8538): typehints should support NamedTuple so we can use
-      # typehints.is_consistent_with here instead
+      # TODO(https://github.com/apache/beam/issues/19923): typehints should
+      # support NamedTuple so we can use typehints.is_consistent_with here
+      # instead
       self.assert_typehints_equal(res.element_type, beam_type)
 
       assert_that(res, equal_to(rows))
diff --git a/sdks/python/apache_beam/examples/complete/game/game_stats_it_test.py b/sdks/python/apache_beam/examples/complete/game/game_stats_it_test.py
index c18eb40c006..2be361eb3c6 100644
--- a/sdks/python/apache_beam/examples/complete/game/game_stats_it_test.py
+++ b/sdks/python/apache_beam/examples/complete/game/game_stats_it_test.py
@@ -106,8 +106,8 @@ class GameStatsIT(unittest.TestCase):
 
   @pytest.mark.it_postcommit
   @pytest.mark.examples_postcommit
-  # TODO(BEAM-13613) This example only works in Dataflow,
-  #  remove mark to enable for other runners when fixed
+  # TODO(https://github.com/apache/beam/issues/21300) This example only works in
+  # Dataflow, remove mark to enable for other runners when fixed
   @pytest.mark.sickbay_direct
   @pytest.mark.sickbay_spark
   @pytest.mark.sickbay_flink
diff --git a/sdks/python/apache_beam/examples/complete/game/leader_board_it_test.py b/sdks/python/apache_beam/examples/complete/game/leader_board_it_test.py
index df686fc00ab..4cc13171fe9 100644
--- a/sdks/python/apache_beam/examples/complete/game/leader_board_it_test.py
+++ b/sdks/python/apache_beam/examples/complete/game/leader_board_it_test.py
@@ -107,8 +107,8 @@ class LeaderBoardIT(unittest.TestCase):
 
   @pytest.mark.it_postcommit
   @pytest.mark.examples_postcommit
-  # TODO(BEAM-13613) This example only works in Dataflow,
-  #  remove mark to enable for other runners when fixed
+  # TODO(https://github.com/apache/beam/issues/21300) This example only works
+  # in Dataflow, remove mark to enable for other runners when fixed
   @pytest.mark.sickbay_direct
   @pytest.mark.sickbay_spark
   @pytest.mark.sickbay_flink
diff --git a/sdks/python/apache_beam/examples/complete/juliaset/setup.py b/sdks/python/apache_beam/examples/complete/juliaset/setup.py
index 79e24b95c85..c4dcbe12113 100644
--- a/sdks/python/apache_beam/examples/complete/juliaset/setup.py
+++ b/sdks/python/apache_beam/examples/complete/juliaset/setup.py
@@ -69,9 +69,9 @@ class build(_build):  # pylint: disable=invalid-name
 #
 #     ['pip', 'install', 'my_package'],
 #
-# TODO(BEAM-3237): Output from the custom commands are missing from the logs.
-# The output of custom commands (including failures) will be logged in the
-# worker-startup log.
+# TODO(https://github.com/apache/beam/issues/18568): Output from the custom
+# commands are missing from the logs. The output of custom commands (including
+# failures) will be logged in the worker-startup log.
 CUSTOM_COMMANDS = [['echo', 'Custom command worked!']]
 
 
diff --git a/sdks/python/apache_beam/examples/dataframe/taxiride_test.py b/sdks/python/apache_beam/examples/dataframe/taxiride_test.py
index 53348345ac7..35bb512e160 100644
--- a/sdks/python/apache_beam/examples/dataframe/taxiride_test.py
+++ b/sdks/python/apache_beam/examples/dataframe/taxiride_test.py
@@ -88,7 +88,8 @@ class TaxiRideExampleTest(unittest.TestCase):
         beam.Pipeline(), self.input_path, self.output_path)
 
     # Parse result file and compare.
-    # TODO(BEAM-12379): taxiride examples should produce int sums, not floats
+    # TODO(https://github.com/apache/beam/issues/20926): taxiride examples
+    # should produce int sums, not floats
     results = []
     with open_shards(f'{self.output_path}-*') as result_file:
       for line in result_file:
diff --git a/sdks/python/apache_beam/examples/snippets/transforms/aggregation/combineglobally_test.py b/sdks/python/apache_beam/examples/snippets/transforms/aggregation/combineglobally_test.py
index acfccde0024..1efc0a3f2e9 100644
--- a/sdks/python/apache_beam/examples/snippets/transforms/aggregation/combineglobally_test.py
+++ b/sdks/python/apache_beam/examples/snippets/transforms/aggregation/combineglobally_test.py
@@ -77,8 +77,8 @@ class CombineGloballyTest(unittest.TestCase):
     combineglobally.combineglobally_side_inputs_singleton(
         check_common_items_with_exceptions)
 
-  # TODO: enable side inputs tests after [BEAM-8400] is fixed.
-  # https://github.com/apache/beam/issues/19851
+  # TODO: enable side inputs tests after
+  # [https://github.com/apache/beam/issues/19851] is fixed.
   # def test_combineglobally_side_inputs_iter(self):
   #   combineglobally.combineglobally_side_inputs_iter(
   #       check_common_items_with_exceptions)
diff --git a/sdks/python/apache_beam/examples/streaming_wordcount_debugging.py b/sdks/python/apache_beam/examples/streaming_wordcount_debugging.py
index f64e6fe1f77..af99a4ab537 100644
--- a/sdks/python/apache_beam/examples/streaming_wordcount_debugging.py
+++ b/sdks/python/apache_beam/examples/streaming_wordcount_debugging.py
@@ -19,7 +19,8 @@
 
 It demonstrate the use of logging and assert_that in streaming mode.
 
-This workflow only works with the DirectRunner (BEAM-3377).
+This workflow only works with the DirectRunner
+(https://github.com/apache/beam/issues/18709).
 
 Usage:
 python streaming_wordcount_debugging.py
diff --git a/sdks/python/apache_beam/examples/streaming_wordcount_debugging_it_test.py b/sdks/python/apache_beam/examples/streaming_wordcount_debugging_it_test.py
index b0c041c05ca..f3460ec24f1 100644
--- a/sdks/python/apache_beam/examples/streaming_wordcount_debugging_it_test.py
+++ b/sdks/python/apache_beam/examples/streaming_wordcount_debugging_it_test.py
@@ -98,7 +98,8 @@ class StreamingWordcountDebuggingIT(unittest.TestCase):
 
   @pytest.mark.it_postcommit
   @unittest.skip(
-      "Skipped due to [BEAM-3377]: assert_that not working for streaming")
+      "Skipped due to [https://github.com/apache/beam/issues/18709]: "
+      "assert_that not working for streaming")
   def test_streaming_wordcount_debugging_it(self):
 
     # Set extra options to the pipeline for test purpose
diff --git a/sdks/python/apache_beam/io/external/gcp/pubsub.py b/sdks/python/apache_beam/io/external/gcp/pubsub.py
index 20f153df20a..a2a3430f9a1 100644
--- a/sdks/python/apache_beam/io/external/gcp/pubsub.py
+++ b/sdks/python/apache_beam/io/external/gcp/pubsub.py
@@ -40,7 +40,7 @@ class ReadFromPubSub(beam.PTransform):
   """An external ``PTransform`` for reading from Cloud Pub/Sub.
 
   Experimental; no backwards compatibility guarantees.  It requires special
-  preparation of the Java SDK.  See BEAM-7870.
+  preparation of the Java SDK.  See https://github.com/apache/beam/issues/19728.
   """
 
   URN = 'beam:transform:org.apache.beam:pubsub_read:v1'
@@ -124,7 +124,7 @@ class WriteToPubSub(beam.PTransform):
   """An external ``PTransform`` for writing messages to Cloud Pub/Sub.
 
   Experimental; no backwards compatibility guarantees.  It requires special
-  preparation of the Java SDK.  See BEAM-7870.
+  preparation of the Java SDK.  See https://github.com/apache/beam/issues/19728.
   """
 
   URN = 'beam:transform:org.apache.beam:pubsub_write:v1'
diff --git a/sdks/python/apache_beam/io/external/xlang_jdbcio_it_test.py b/sdks/python/apache_beam/io/external/xlang_jdbcio_it_test.py
index 77856046d85..c81adf8dd94 100644
--- a/sdks/python/apache_beam/io/external/xlang_jdbcio_it_test.py
+++ b/sdks/python/apache_beam/io/external/xlang_jdbcio_it_test.py
@@ -139,7 +139,8 @@ class CrossLanguageJdbcIOTest(unittest.TestCase):
       _ = (
           p
           | beam.Create(inserted_rows).with_output_types(JdbcWriteTestRow)
-          # TODO(BEAM-10750) Add test with overridden write_statement
+          # TODO(https://github.com/apache/beam/issues/20446) Add test with
+          # overridden write_statement
           | 'Write to jdbc' >> WriteToJdbc(
               table_name=table_name,
               driver_class_name=self.driver_class_name,
@@ -176,7 +177,8 @@ class CrossLanguageJdbcIOTest(unittest.TestCase):
       p.not_use_test_runner_api = True
       result = (
           p
-          # TODO(BEAM-10750) Add test with overridden read_query
+          # TODO(https://github.com/apache/beam/issues/20446) Add test with
+          # overridden read_query
           | 'Read from jdbc' >> ReadFromJdbc(
               table_name=table_name,
               driver_class_name=self.driver_class_name,
diff --git a/sdks/python/apache_beam/io/external/xlang_kinesisio_it_test.py b/sdks/python/apache_beam/io/external/xlang_kinesisio_it_test.py
index d12c220ee35..151d63d8468 100644
--- a/sdks/python/apache_beam/io/external/xlang_kinesisio_it_test.py
+++ b/sdks/python/apache_beam/io/external/xlang_kinesisio_it_test.py
@@ -83,7 +83,8 @@ class CrossLanguageKinesisIOTest(unittest.TestCase):
       TestPipeline().get_option('aws_kinesis_stream'),
       'Cannot test on real aws without pipeline options provided')
   def test_kinesis_io_roundtrip(self):
-    # TODO: enable this test for localstack once BEAM-10664 is resolved
+    # TODO: enable this test for localstack once
+    # https://github.com/apache/beam/issues/20416 is resolved
     self.run_kinesis_write()
     self.run_kinesis_read()
 
@@ -91,7 +92,8 @@ class CrossLanguageKinesisIOTest(unittest.TestCase):
       TestPipeline().get_option('aws_kinesis_stream'),
       'Do not test on localstack when pipeline options were provided')
   def test_kinesis_write(self):
-    # TODO: remove this test once BEAM-10664 is resolved
+    # TODO: remove this test once
+    # https://github.com/apache/beam/issues/20416 is resolved
     self.run_kinesis_write()
     records = self.kinesis_helper.read_from_stream(self.aws_kinesis_stream)
     self.assertEqual(
diff --git a/sdks/python/apache_beam/io/fileio.py b/sdks/python/apache_beam/io/fileio.py
index d1839e9de0f..0a5e02b8175 100644
--- a/sdks/python/apache_beam/io/fileio.py
+++ b/sdks/python/apache_beam/io/fileio.py
@@ -404,7 +404,7 @@ def _format_shard(
     kwargs['start'] = window.start.to_utc_datetime().isoformat()
     kwargs['end'] = window.end.to_utc_datetime().isoformat()
 
-  # TODO(BEAM-3759): Add support for PaneInfo
+  # TODO(https://github.com/apache/beam/issues/18721): Add support for PaneInfo
   # If the PANE is the ONLY firing in the window, we don't add it.
   #if pane and not (pane.is_first and pane.is_last):
   #  kwargs['pane'] = pane.index
diff --git a/sdks/python/apache_beam/io/fileio_test.py b/sdks/python/apache_beam/io/fileio_test.py
index ab4dba2366c..67bb1544099 100644
--- a/sdks/python/apache_beam/io/fileio_test.py
+++ b/sdks/python/apache_beam/io/fileio_test.py
@@ -543,7 +543,7 @@ class WriteFilesTest(_TestCaseWithTempDirCleanUp):
                     if row['foundation'] == 'apache']),
           label='verifyApache')
 
-  @unittest.skip('BEAM-13010')
+  @unittest.skip('https://github.com/apache/beam/issues/21269')
   def test_find_orphaned_files(self):
     dir = self._new_tempdir()
 
@@ -639,8 +639,8 @@ class WriteFilesTest(_TestCaseWithTempDirCleanUp):
     # Use state on the TestCase class, since other references would be pickled
     # into a closure and not have the desired side effects.
     #
-    # TODO(BEAM-5295): Use assert_that after it works for the cases here in
-    # streaming mode.
+    # TODO(https://github.com/apache/beam/issues/18987): Use assert_that after
+    # it works for the cases here in streaming mode.
     WriteFilesTest.all_records = []
 
     dir = '%s%s' % (self._new_tempdir(), os.sep)
@@ -652,7 +652,8 @@ class WriteFilesTest(_TestCaseWithTempDirCleanUp):
 
       ts.add_elements([('key', '%s' % elm)])
       if timestamp % 5 == 0 and timestamp != 0:
-        # TODO(BEAM-3759): Add many firings per window after getting PaneInfo.
+        # TODO(https://github.com/apache/beam/issues/18721): Add many firings
+        # per window after getting PaneInfo.
         ts.advance_processing_time(5)
         ts.advance_watermark_to(timestamp)
     ts.advance_watermark_to_infinity()
diff --git a/sdks/python/apache_beam/io/gcp/big_query_query_to_table_it_test.py b/sdks/python/apache_beam/io/gcp/big_query_query_to_table_it_test.py
index 699dfa4b119..ba1349281d6 100644
--- a/sdks/python/apache_beam/io/gcp/big_query_query_to_table_it_test.py
+++ b/sdks/python/apache_beam/io/gcp/big_query_query_to_table_it_test.py
@@ -146,8 +146,9 @@ class BigQueryQueryToTableIT(unittest.TestCase):
                       'time': '00:00:00'
                   }]
     # the API Tools bigquery client expects byte values to be base-64 encoded
-    # TODO BEAM-4850: upgrade to google-cloud-bigquery which does not require
-    # handling the encoding in beam
+    # TODO https://github.com/apache/beam/issues/19073: upgrade to
+    # google-cloud-bigquery which does not require handling the encoding in
+    # beam
     for row in table_data:
       row['bytes'] = base64.b64encode(row['bytes']).decode('utf-8')
     passed, errors = self.bigquery_client.insert_rows(
diff --git a/sdks/python/apache_beam/io/gcp/bigquery.py b/sdks/python/apache_beam/io/gcp/bigquery.py
index d9809005734..882267573df 100644
--- a/sdks/python/apache_beam/io/gcp/bigquery.py
+++ b/sdks/python/apache_beam/io/gcp/bigquery.py
@@ -673,8 +673,9 @@ class _BigQuerySource(dataflow_io.NativeSource):
         kms_key=self.kms_key)
 
 
-# TODO(BEAM-14331): remove the serialization restriction in transform
-# implementation once InteractiveRunner can work without runner api roundtrips.
+# TODO(https://github.com/apache/beam/issues/21622): remove the serialization
+# restriction in transform implementation once InteractiveRunner can work
+# without runner api roundtrips.
 @dataclass
 class _BigQueryExportResult:
   coder: beam.coders.Coder
@@ -1235,7 +1236,8 @@ class _CustomBigQueryStorageStreamSource(BoundedSource):
   def estimate_size(self):
     # The size of stream source cannot be estimate due to server-side liquid
     # sharding.
-    # TODO(BEAM-12990): Implement progress reporting.
+    # TODO(https://github.com/apache/beam/issues/21126): Implement progress
+    # reporting.
     return None
 
   def split(self, desired_bundle_size, start_position=None, stop_position=None):
@@ -1250,7 +1252,8 @@ class _CustomBigQueryStorageStreamSource(BoundedSource):
         stop_position=None)
 
   def get_range_tracker(self, start_position, stop_position):
-    # TODO(BEAM-12989): Implement dynamic work rebalancing.
+    # TODO(https://github.com/apache/beam/issues/21127): Implement dynamic work
+    # rebalancing.
     assert start_position is None
     # Defaulting to the start of the stream.
     start_position = 0
@@ -1988,7 +1991,8 @@ class WriteToBigQuery(PTransform):
       validate=True,
       temp_file_format=None,
       ignore_insert_ids=False,
-      # TODO(BEAM-11857): Switch the default when the feature is mature.
+      # TODO(https://github.com/apache/beam/issues/20712): Switch the default
+      # when the feature is mature.
       with_auto_sharding=False,
       ignore_unknown_columns=False,
       load_job_project_id=None):
@@ -2518,7 +2522,8 @@ class ReadFromBigQuery(PTransform):
           'or DIRECT_READ.')
 
   def _expand_export(self, pcoll):
-    # TODO(BEAM-11115): Make ReadFromBQ rely on ReadAllFromBQ implementation.
+    # TODO(https://github.com/apache/beam/issues/20683): Make ReadFromBQ rely
+    # on ReadAllFromBQ implementation.
     temp_location = pcoll.pipeline.options.view_as(
         GoogleCloudOptions).temp_location
     job_name = pcoll.pipeline.options.view_as(GoogleCloudOptions).job_name
diff --git a/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py b/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
index ddc8fe61db0..41fdb6e7cc8 100644
--- a/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
+++ b/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
@@ -943,9 +943,10 @@ class BigQueryBatchFileLoads(beam.PTransform):
             file_prefix_pcv,
             *self.schema_side_inputs))
 
-    # TODO(BEAM-9494): Remove the identity transform. We flatten both
-    # PCollection paths and use an identity function to work around a
-    # flatten optimization issue where the wrong coder is being used.
+    # TODO(https://github.com/apache/beam/issues/20285): Remove the identity
+    # transform. We flatten both PCollection paths and use an identity function
+    # to work around a flatten optimization issue where the wrong coder is
+    # being used.
     all_destination_file_pairs_pc = (
         (destination_files_kv_pc, more_destination_files_kv_pc)
         | "DestinationFilesUnion" >> beam.Flatten()
diff --git a/sdks/python/apache_beam/io/gcp/bigquery_read_it_test.py b/sdks/python/apache_beam/io/gcp/bigquery_read_it_test.py
index e01b48283b7..babc6f105b0 100644
--- a/sdks/python/apache_beam/io/gcp/bigquery_read_it_test.py
+++ b/sdks/python/apache_beam/io/gcp/bigquery_read_it_test.py
@@ -665,7 +665,8 @@ class ReadAllBQTests(BigQueryReadIntegrationTests):
   @skip(['PortableRunner', 'FlinkRunner'])
   @pytest.mark.it_postcommit
   def test_read_queries(self):
-    # TODO(BEAM-11311): Remove experiment when tests run on r_v2.
+    # TODO(https://github.com/apache/beam/issues/20610): Remove experiment when
+    # tests run on r_v2.
     args = self.args + ["--experiments=use_runner_v2"]
     with beam.Pipeline(argv=args) as p:
       result = (
diff --git a/sdks/python/apache_beam/io/gcp/bigtableio.py b/sdks/python/apache_beam/io/gcp/bigtableio.py
index 70b84b9c34f..d1a1f86f1cc 100644
--- a/sdks/python/apache_beam/io/gcp/bigtableio.py
+++ b/sdks/python/apache_beam/io/gcp/bigtableio.py
@@ -71,8 +71,8 @@ try:
 
         # If even one request fails we retry everything. BigTable mutations are
         # idempotent so this should be correct.
-        # TODO(BEAM-13849): make this more efficient by retrying only
-        # re-triable failed requests.
+        # TODO(https://github.com/apache/beam/issues/21396): make this more
+        # efficient by retrying only re-triable failed requests.
         for status in status_list:
           if not status:
             # BigTable client may return 'None' instead of a valid status in
diff --git a/sdks/python/apache_beam/io/gcp/bigtableio_test.py b/sdks/python/apache_beam/io/gcp/bigtableio_test.py
index 24fc1ff385d..f8664a0f921 100644
--- a/sdks/python/apache_beam/io/gcp/bigtableio_test.py
+++ b/sdks/python/apache_beam/io/gcp/bigtableio_test.py
@@ -78,7 +78,8 @@ class TestWriteBigTable(unittest.TestCase):
         write_fn.finish_bundle()
       except:  # pylint: disable=bare-except
         # Currently we fail the bundle when there are any failures.
-        # TODO(BEAM-13849): remove after bigtableio can selectively retry.
+        # TODO(https://github.com/apache/beam/issues/21396): remove after
+        # bigtableio can selectively retry.
         pass
       self.verify_write_call_metric(
           self._PROJECT_ID,
diff --git a/sdks/python/apache_beam/io/gcp/gcsio_integration_test.py b/sdks/python/apache_beam/io/gcp/gcsio_integration_test.py
index bf67273bad0..814a7a3ad00 100644
--- a/sdks/python/apache_beam/io/gcp/gcsio_integration_test.py
+++ b/sdks/python/apache_beam/io/gcp/gcsio_integration_test.py
@@ -122,7 +122,8 @@ class GcsIOIntegrationTest(unittest.TestCase):
     self._test_copy("test_copy_kms", self.kms_key_name)
 
   @pytest.mark.it_postcommit_sickbay
-  #@pytest.mark.it_postcommit BEAM-12352 maxBytesRewrittenPerCall '
+  #@pytest.mark.it_postcommit https://github.com/apache/beam/issues/20934
+  # maxBytesRewrittenPerCall '
   def test_copy_rewrite_token(self):
     # Tests a multi-part copy (rewrite) operation. This is triggered by a
     # combination of 3 conditions:
@@ -176,7 +177,8 @@ class GcsIOIntegrationTest(unittest.TestCase):
     self._test_copy_batch("test_copy_batch_kms", self.kms_key_name)
 
   @pytest.mark.it_postcommit_sickbay
-  #@pytest.mark.it_postcommit BEAM-12352  maxBytesRewrittenPerCall
+  #@pytest.mark.it_postcommit https://github.com/apache/beam/issues/20934
+  # maxBytesRewrittenPerCall
   def test_copy_batch_rewrite_token(self):
     # Tests a multi-part copy (rewrite) operation. This is triggered by a
     # combination of 3 conditions:
diff --git a/sdks/python/apache_beam/io/gcp/pubsub.py b/sdks/python/apache_beam/io/gcp/pubsub.py
index 24ba13697d9..8cee8acfebb 100644
--- a/sdks/python/apache_beam/io/gcp/pubsub.py
+++ b/sdks/python/apache_beam/io/gcp/pubsub.py
@@ -270,7 +270,7 @@ class ReadFromPubSub(PTransform):
 
   def to_runner_api_parameter(self, context):
     # Required as this is identified by type in PTransformOverrides.
-    # TODO(BEAM-3812): Use an actual URN here.
+    # TODO(https://github.com/apache/beam/issues/18713): Use an actual URN here.
     return self.to_runner_api_pickled(context)
 
 
@@ -380,7 +380,7 @@ class WriteToPubSub(PTransform):
 
   def to_runner_api_parameter(self, context):
     # Required as this is identified by type in PTransformOverrides.
-    # TODO(BEAM-3812): Use an actual URN here.
+    # TODO(https://github.com/apache/beam/issues/18713): Use an actual URN here.
     return self.to_runner_api_pickled(context)
 
   def display_data(self):
diff --git a/sdks/python/apache_beam/io/gcp/pubsub_integration_test.py b/sdks/python/apache_beam/io/gcp/pubsub_integration_test.py
index 6754c70068a..28c30df1d55 100644
--- a/sdks/python/apache_beam/io/gcp/pubsub_integration_test.py
+++ b/sdks/python/apache_beam/io/gcp/pubsub_integration_test.py
@@ -54,9 +54,10 @@ class PubSubIntegrationTest(unittest.TestCase):
   ID_LABEL = 'id'
   TIMESTAMP_ATTRIBUTE = 'timestamp'
   INPUT_MESSAGES = {
-      # TODO(BEAM-4275): DirectRunner doesn't support reading or writing
-      # label_ids, nor writing timestamp attributes. Once these features exist,
-      # TestDirectRunner and TestDataflowRunner should behave identically.
+      # TODO(https://github.com/apache/beam/issues/18939): DirectRunner doesn't
+      # support reading or writing label_ids, nor writing timestamp attributes.
+      # Once these features exist, TestDirectRunner and TestDataflowRunner
+      # should behave identically.
       'TestDirectRunner': [
           PubsubMessage(b'data001', {}),
           # For those elements that have the TIMESTAMP_ATTRIBUTE attribute, the
diff --git a/sdks/python/apache_beam/io/gcp/pubsub_test.py b/sdks/python/apache_beam/io/gcp/pubsub_test.py
index 570ec475fad..8e297511ce0 100644
--- a/sdks/python/apache_beam/io/gcp/pubsub_test.py
+++ b/sdks/python/apache_beam/io/gcp/pubsub_test.py
@@ -406,7 +406,8 @@ class TestWriteStringsToPubSubOverride(unittest.TestCase):
     # Ensure that the properties passed through correctly
     self.assertEqual('a_topic', write_transform.dofn.short_topic_name)
     self.assertEqual(True, write_transform.dofn.with_attributes)
-    # TODO(BEAM-4275): These properties aren't supported yet in direct runner.
+    # TODO(https://github.com/apache/beam/issues/18939): These properties
+    # aren't supported yet in direct runner.
     self.assertEqual(None, write_transform.dofn.id_label)
     self.assertEqual(None, write_transform.dofn.timestamp_attribute)
 
diff --git a/sdks/python/apache_beam/io/watermark_estimators.py b/sdks/python/apache_beam/io/watermark_estimators.py
index be91ffb9c75..5051c2d5e4e 100644
--- a/sdks/python/apache_beam/io/watermark_estimators.py
+++ b/sdks/python/apache_beam/io/watermark_estimators.py
@@ -40,8 +40,8 @@ class MonotonicWatermarkEstimator(WatermarkEstimator):
     if self._watermark is None:
       self._watermark = timestamp
     else:
-      # TODO(BEAM-9312): Consider making it configurable to deal with late
-      # timestamp.
+      # TODO(https://github.com/apache/beam/issues/20041): Consider making it
+      # configurable to deal with late timestamp.
       if timestamp < self._watermark:
         raise ValueError(
             'A MonotonicWatermarkEstimator expects output '
diff --git a/sdks/python/apache_beam/ml/inference/base.py b/sdks/python/apache_beam/ml/inference/base.py
index ad7191cb59b..dd6503b3deb 100644
--- a/sdks/python/apache_beam/ml/inference/base.py
+++ b/sdks/python/apache_beam/ml/inference/base.py
@@ -229,7 +229,8 @@ class RunInference(beam.PTransform[beam.PCollection[ExampleT],
   Models for supported frameworks can be loaded via a URI. Supported services
   can also be used.
 
-  TODO(BEAM-14046): Add and link to help documentation
+  TODO(https://github.com/apache/beam/issues/21436): Add and link to help
+  documentation
   """
   def __init__(
       self,
@@ -240,14 +241,16 @@ class RunInference(beam.PTransform[beam.PCollection[ExampleT],
     self._kwargs = kwargs
     self._clock = clock
 
-  # TODO(BEAM-14208): Add batch_size back off in the case there
-  # are functional reasons large batch sizes cannot be handled.
+  # TODO(https://github.com/apache/beam/issues/21447): Add batch_size back off
+  # in the case there are functional reasons large batch sizes cannot be
+  # handled.
   def expand(
       self, pcoll: beam.PCollection[ExampleT]) -> beam.PCollection[PredictionT]:
     resource_hints = self._model_handler.get_resource_hints()
     return (
         pcoll
-        # TODO(BEAM-14044): Hook into the batching DoFn APIs.
+        # TODO(https://github.com/apache/beam/issues/21440): Hook into the
+        # batching DoFn APIs.
         | beam.BatchElements(**self._model_handler.batch_elements_kwargs())
         | (
             beam.ParDo(
@@ -327,7 +330,8 @@ class _RunInferenceDoFn(beam.DoFn, Generic[ExampleT, PredictionT]):
           load_model_latency_ms, model_byte_size)
       return model
 
-    # TODO(BEAM-14207): Investigate releasing model.
+    # TODO(https://github.com/apache/beam/issues/21443): Investigate releasing
+    # model.
     return self._shared_model_handle.acquire(load)
 
   def setup(self):
@@ -350,7 +354,8 @@ class _RunInferenceDoFn(beam.DoFn, Generic[ExampleT, PredictionT]):
     return predictions
 
   def finish_bundle(self):
-    # TODO(BEAM-13970): Figure out why there is a cache.
+    # TODO(https://github.com/apache/beam/issues/21435): Figure out why there
+    # is a cache.
     self._metrics_collector.update_metrics_with_cache()
 
 
diff --git a/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py b/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py
index ecd81d204d6..b90ec9cbb54 100644
--- a/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py
+++ b/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py
@@ -173,7 +173,9 @@ class SkLearnRunInferenceTest(unittest.TestCase):
         sys.getsizeof(batched_examples_float[0]) * 3,
         inference_runner.get_num_bytes(batched_examples_float))
 
-  @unittest.skipIf(platform.system() == 'Windows', 'BEAM-14359')
+  @unittest.skipIf(
+      platform.system() == 'Windows',
+      'https://github.com/apache/beam/issues/21449')
   def test_pipeline_pickled(self):
     temp_file_name = self.tmpdir + os.sep + 'pickled_file'
     with open(temp_file_name, 'wb') as file:
@@ -191,7 +193,9 @@ class SkLearnRunInferenceTest(unittest.TestCase):
       assert_that(
           actual, equal_to(expected, equals_fn=_compare_prediction_result))
 
-  @unittest.skipIf(platform.system() == 'Windows', 'BEAM-14359')
+  @unittest.skipIf(
+      platform.system() == 'Windows',
+      'https://github.com/apache/beam/issues/21449')
   def test_pipeline_joblib(self):
     temp_file_name = self.tmpdir + os.sep + 'joblib_file'
     with open(temp_file_name, 'wb') as file:
@@ -220,7 +224,9 @@ class SkLearnRunInferenceTest(unittest.TestCase):
             SklearnModelHandlerNumpy(model_uri='/var/bad_file_name'))
         pipeline.run()
 
-  @unittest.skipIf(platform.system() == 'Windows', 'BEAM-14359')
+  @unittest.skipIf(
+      platform.system() == 'Windows',
+      'https://github.com/apache/beam/issues/21449')
   def test_bad_input_type_raises(self):
     with self.assertRaisesRegex(AssertionError,
                                 'Unsupported serialization type'):
@@ -229,7 +235,9 @@ class SkLearnRunInferenceTest(unittest.TestCase):
             model_uri=file.name, model_file_type=None)
         model_handler.load_model()
 
-  @unittest.skipIf(platform.system() == 'Windows', 'BEAM-14359')
+  @unittest.skipIf(
+      platform.system() == 'Windows',
+      'https://github.com/apache/beam/issues/21449')
   def test_pipeline_pandas(self):
     temp_file_name = self.tmpdir + os.sep + 'pickled_file'
     with open(temp_file_name, 'wb') as file:
@@ -274,7 +282,9 @@ class SkLearnRunInferenceTest(unittest.TestCase):
       assert_that(
           actual, equal_to(expected, equals_fn=_compare_dataframe_predictions))
 
-  @unittest.skipIf(platform.system() == 'Windows', 'BEAM-14359')
+  @unittest.skipIf(
+      platform.system() == 'Windows',
+      'https://github.com/apache/beam/issues/21449')
   def test_pipeline_pandas_with_keys(self):
     temp_file_name = self.tmpdir + os.sep + 'pickled_file'
     with open(temp_file_name, 'wb') as file:
diff --git a/sdks/python/apache_beam/options/pipeline_options.py b/sdks/python/apache_beam/options/pipeline_options.py
index d19b2c23217..83fb77a4f40 100644
--- a/sdks/python/apache_beam/options/pipeline_options.py
+++ b/sdks/python/apache_beam/options/pipeline_options.py
@@ -302,8 +302,9 @@ class PipelineOptions(HasDisplayData):
       Dictionary of all args and values.
     """
 
-    # TODO(BEAM-1319): PipelineOption sub-classes in the main session might be
-    # repeated. Pick last unique instance of each subclass to avoid conflicts.
+    # TODO(https://github.com/apache/beam/issues/18197): PipelineOption
+    # sub-classes in the main session might be repeated. Pick last unique
+    # instance of each subclass to avoid conflicts.
     subset = {}
     parser = _BeamArgumentParser()
     for cls in PipelineOptions.__subclasses__():
diff --git a/sdks/python/apache_beam/options/pipeline_options_test.py b/sdks/python/apache_beam/options/pipeline_options_test.py
index 3195f3594fe..6abfffe6910 100644
--- a/sdks/python/apache_beam/options/pipeline_options_test.py
+++ b/sdks/python/apache_beam/options/pipeline_options_test.py
@@ -218,7 +218,8 @@ class PipelineOptionsTest(unittest.TestCase):
       parser.add_argument(
           '--fake_multi_option', action='append', help='fake multi option')
 
-  @unittest.skip("TODO(BEAM-12515): Flaky test.")
+  @unittest.skip(
+      "TODO(https://github.com/apache/beam/issues/21116): Flaky test.")
   def test_display_data(self):
     for case in PipelineOptionsTest.TEST_CASES:
       options = PipelineOptions(flags=case['flags'])
@@ -516,9 +517,9 @@ class PipelineOptionsTest(unittest.TestCase):
     options = PipelineOptions(['--redefined_flag'])
     self.assertTrue(options.get_all_options()['redefined_flag'])
 
-  # TODO(BEAM-1319): Require unique names only within a test.
-  # For now, <file name acronym>_vp_arg<number> will be the convention
-  # to name value-provider arguments in tests, as opposed to
+  # TODO(https://github.com/apache/beam/issues/18197): Require unique names
+  # only within a test. For now, <file name acronym>_vp_arg<number> will be
+  # the convention to name value-provider arguments in tests, as opposed to
   # <file name acronym>_non_vp_arg<number> for non-value-provider arguments.
   # The number will grow per file as tests are added.
   def test_value_provider_options(self):
diff --git a/sdks/python/apache_beam/options/value_provider_test.py b/sdks/python/apache_beam/options/value_provider_test.py
index 21e05b326a0..42afa8c0def 100644
--- a/sdks/python/apache_beam/options/value_provider_test.py
+++ b/sdks/python/apache_beam/options/value_provider_test.py
@@ -31,9 +31,9 @@ from apache_beam.options.value_provider import RuntimeValueProvider
 from apache_beam.options.value_provider import StaticValueProvider
 
 
-# TODO(BEAM-1319): Require unique names only within a test.
-# For now, <file name acronym>_vp_arg<number> will be the convention
-# to name value-provider arguments in tests, as opposed to
+# TODO(https://github.com/apache/beam/issues/18197): Require unique names only
+# within a test. For now, <file name acronym>_vp_arg<number> will be the
+# convention to name value-provider arguments in tests, as opposed to
 # <file name acronym>_non_vp_arg<number> for non-value-provider arguments.
 # The number will grow per file as tests are added.
 class ValueProviderTests(unittest.TestCase):
diff --git a/sdks/python/apache_beam/pipeline.py b/sdks/python/apache_beam/pipeline.py
index 1517cd37998..6b9d21104ba 100644
--- a/sdks/python/apache_beam/pipeline.py
+++ b/sdks/python/apache_beam/pipeline.py
@@ -301,7 +301,8 @@ class Pipeline(object):
               original_transform_node.full_label,
               original_transform_node.main_inputs)
 
-          # TODO(BEAM-12854): Merge rather than override.
+          # TODO(https://github.com/apache/beam/issues/21178): Merge rather
+          # than override.
           replacement_transform_node.resource_hints = (
               original_transform_node.resource_hints)
 
@@ -791,7 +792,8 @@ class Pipeline(object):
           result_element_type, {'*': typehints.Any})
     elif isinstance(result_pcollection, pvalue.DoOutputsTuple):
       # {Single, multi}-input, multi-output inference.
-      # TODO(BEAM-4132): Add support for tagged type hints.
+      # TODO(https://github.com/apache/beam/issues/18957): Add support for
+      #   tagged type hints.
       #   https://github.com/apache/beam/pull/9810#discussion_r338765251
       for pcoll in result_pcollection:
         if pcoll.element_type is None:
@@ -1263,8 +1265,9 @@ class AppliedPTransform(object):
     # External transforms require more splicing than just setting the spec.
     from apache_beam.transforms import external
     if isinstance(self.transform, external.ExternalTransform):
-      # TODO(BEAM-12082): Support resource hints in XLang transforms.
-      # In particular, make sure hints on composites are properly propagated.
+      # TODO(https://github.com/apache/beam/issues/18371): Support resource
+      # hints in XLang transforms. In particular, make sure hints on composites
+      # are properly propagated.
       return self.transform.to_runner_api_transform(context, self.full_label)
 
     from apache_beam.portability.api import beam_runner_api_pb2
@@ -1315,7 +1318,7 @@ class AppliedPTransform(object):
         },
         environment_id=environment_id,
         annotations=self.annotations,
-        # TODO(BEAM-366): Add display_data.
+        # TODO(https://github.com/apache/beam/issues/18012): Add display_data.
         display_data=DisplayData.create_from(self.transform).to_proto()
         if self.transform else None)
 
@@ -1350,8 +1353,8 @@ class AppliedPTransform(object):
         transform._resource_hints = dict(resource_hints)
 
     # Ordering is important here.
-    # TODO(BEAM-9635): use key, value pairs instead of depending on tags with
-    # index as a suffix.
+    # TODO(https://github.com/apache/beam/issues/20136): use key, value pairs
+    # instead of depending on tags with index as a suffix.
     indexed_side_inputs = [
         (get_sideinput_index(tag), context.pcollections.get_by_id(id)) for tag,
         id in proto.inputs.items() if tag in side_input_tags
diff --git a/sdks/python/apache_beam/runners/common.py b/sdks/python/apache_beam/runners/common.py
index 53064dd23eb..9d47c2ce6a2 100644
--- a/sdks/python/apache_beam/runners/common.py
+++ b/sdks/python/apache_beam/runners/common.py
@@ -353,7 +353,7 @@ class DoFnSignature(object):
         raise NotImplementedError(
             f"DoFn {self.do_fn!r} has unsupported per-key DoFn param {d}. "
             "Per-key DoFn params are not yet supported for process_batch "
-            "(BEAM-14409).")
+            "(https://github.com/apache/beam/issues/21653).")
 
       # Fallback to catch anything not explicitly supported
       if not d in (core.DoFn.WindowParam,
@@ -654,8 +654,8 @@ def _get_arg_placeholders(
       self.placeholder = placeholder
 
   if all(core.DoFn.ElementParam != arg for arg in default_arg_values):
-    # TODO(BEAM-7867): Handle cases in which len(arg_names) ==
-    #   len(default_arg_values).
+    # TODO(https://github.com/apache/beam/issues/19631): Handle cases in which
+    #   len(arg_names) == len(default_arg_values).
     args_to_pick = len(arg_names) - len(default_arg_values) - 1
     # Positional argument values for process(), with placeholders for special
     # values such as the element, timestamp, etc.
@@ -1047,7 +1047,9 @@ class PerWindowInvoker(DoFnInvoker):
       if core.DoFn.ElementParam == p:
         args_for_process_batch[i] = windowed_batch.values
       elif core.DoFn.KeyParam == p:
-        raise NotImplementedError("BEAM-14409: Per-key process_batch")
+        raise NotImplementedError(
+            "https://github.com/apache/beam/issues/21653: "
+            "Per-key process_batch")
       elif core.DoFn.WindowParam == p:
         args_for_process_batch[i] = window
       elif core.DoFn.TimestampParam == p:
@@ -1056,9 +1058,13 @@ class PerWindowInvoker(DoFnInvoker):
         assert isinstance(windowed_batch, HomogeneousWindowedBatch)
         args_for_process_batch[i] = windowed_batch.pane_info
       elif isinstance(p, core.DoFn.StateParam):
-        raise NotImplementedError("BEAM-14409: Per-key process_batch")
+        raise NotImplementedError(
+            "https://github.com/apache/beam/issues/21653: "
+            "Per-key process_batch")
       elif isinstance(p, core.DoFn.TimerParam):
-        raise NotImplementedError("BEAM-14409: Per-key process_batch")
+        raise NotImplementedError(
+            "https://github.com/apache/beam/issues/21653: "
+            "Per-key process_batch")
 
     kwargs_for_process_batch = kwargs_for_process_batch or {}
 
@@ -1362,7 +1368,8 @@ class DoFnRunner:
     # Optimize for the common case.
     main_receivers = tagged_receivers[None]
 
-    # TODO(BEAM-3937): Remove if block after output counter released.
+    # TODO(https://github.com/apache/beam/issues/18886): Remove if block after
+    # output counter released.
     if 'outputs_per_element_counter' in RuntimeValueProvider.experiments:
       # TODO(BEAM-3955): Make step_name and operation_name less confused.
       output_counter_name = (
@@ -1556,8 +1563,9 @@ class _OutputHandler(OutputHandler):
     """
     results = results or []
 
-    # TODO(BEAM-10782): Verify that the results object is a valid iterable type
-    #  if performance_runtime_type_check is active, without harming performance
+    # TODO(https://github.com/apache/beam/issues/20404): Verify that the
+    #  results object is a valid iterable type if
+    #  performance_runtime_type_check is active, without harming performance
     output_element_count = 0
     for result in results:
       tag, result = self._handle_tagged_output(result)
@@ -1590,8 +1598,9 @@ class _OutputHandler(OutputHandler):
 
         self._write_batch_to_tag(tag, windowed_batch, watermark_estimator)
 
-    # TODO(BEAM-3937): Remove if block after output counter released.
-    # Only enable per_element_output_counter when counter cythonized
+    # TODO(https://github.com/apache/beam/issues/18886): Remove if block after
+    # output counter released. Only enable per_element_output_counter when
+    # counter cythonized
     if self.per_element_output_counter is not None:
       self.per_element_output_counter.add_input(output_element_count)
 
@@ -1640,8 +1649,9 @@ class _OutputHandler(OutputHandler):
 
         self._write_value_to_tag(tag, windowed_value, watermark_estimator)
 
-    # TODO(BEAM-3937): Remove if block after output counter released.
-    # Only enable per_element_output_counter when counter cythonized
+    # TODO(https://github.com/apache/beam/issues/18886): Remove if block after
+    # output counter released. Only enable per_element_output_counter when
+    # counter cythonized
     if self.per_element_output_counter is not None:
       self.per_element_output_counter.add_input(output_element_count)
 
diff --git a/sdks/python/apache_beam/runners/dataflow/dataflow_metrics.py b/sdks/python/apache_beam/runners/dataflow/dataflow_metrics.py
index 516e0ffd160..c1532ebc7d9 100644
--- a/sdks/python/apache_beam/runners/dataflow/dataflow_metrics.py
+++ b/sdks/python/apache_beam/runners/dataflow/dataflow_metrics.py
@@ -288,9 +288,10 @@ def main(argv):
   main method to display MetricResults for a specific --job_id and --project
   which takes only a few seconds.
   """
-  # TODO(BEAM-6833): The MetricResults do not show translated step names as the
-  # job_graph is not provided to DataflowMetrics.
-  # Import here to avoid adding the dependency for local running scenarios.
+  # TODO(https://github.com/apache/beam/issues/19452): The MetricResults do not
+  # show translated step names as the job_graph is not provided to
+  # DataflowMetrics. Import here to avoid adding the dependency for local
+  # running scenarios.
   try:
     # pylint: disable=wrong-import-order, wrong-import-position
     from apache_beam.runners.dataflow.internal import apiclient
diff --git a/sdks/python/apache_beam/runners/dataflow/dataflow_metrics_test.py b/sdks/python/apache_beam/runners/dataflow/dataflow_metrics_test.py
index a2ef0fe12dc..06e1585ffc4 100644
--- a/sdks/python/apache_beam/runners/dataflow/dataflow_metrics_test.py
+++ b/sdks/python/apache_beam/runners/dataflow/dataflow_metrics_test.py
@@ -65,8 +65,8 @@ class DictToObject(object):
 
 class TestDataflowMetrics(unittest.TestCase):
 
-  # TODO(BEAM-6734): Write a dump tool to generate this fake data, or
-  # somehow make this easier to maintain.
+  # TODO(https://github.com/apache/beam/issues/19258): Write a dump tool to
+  # generate this fake data, or somehow make this easier to maintain.
   ONLY_COUNTERS_LIST = {
       "metrics": [
           {
diff --git a/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py b/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
index 49f7251c055..22955ef39db 100644
--- a/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
+++ b/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
@@ -288,11 +288,11 @@ class DataflowRunner(PipelineRunner):
             access_pattern = side_input._side_input_data().access_pattern
             if access_pattern == common_urns.side_inputs.ITERABLE.urn:
               if use_unified_worker or not use_fn_api:
-                # TODO(BEAM-9173): Stop patching up the access pattern to
-                # appease Dataflow when using the UW and hardcode the output
-                # type to be Any since the Dataflow JSON and pipeline proto
-                # can differ in coders which leads to encoding/decoding issues
-                # within the runner.
+                # TODO(https://github.com/apache/beam/issues/20043): Stop
+                # patching up the access pattern to appease Dataflow when
+                # using the UW and hardcode the output type to be Any since
+                # the Dataflow JSON and pipeline proto can differ in coders
+                # which leads to encoding/decoding issues within the runner.
                 side_input.pvalue.element_type = typehints.Any
                 new_side_input = _DataflowIterableSideInput(side_input)
               else:
@@ -377,7 +377,8 @@ class DataflowRunner(PipelineRunner):
 
       @staticmethod
       def _overrides_setup_or_teardown(combinefn):
-        # TODO(BEAM-3736): provide an implementation for this method
+        # TODO(https://github.com/apache/beam/issues/18716): provide an
+        # implementation for this method
         return False
 
     return CombineFnVisitor()
@@ -1663,8 +1664,8 @@ class DataflowPipelineResult(PipelineResult):
           'Job did not reach to a terminal state after waiting indefinitely. '
           '{}'.format(consoleUrl))
 
-      # TODO(BEAM-14291): Also run this check if wait_until_finish was called
-      # after the pipeline completed.
+      # TODO(https://github.com/apache/beam/issues/21695): Also run this check
+      # if wait_until_finish was called after the pipeline completed.
       if terminated and self.state != PipelineState.DONE:
         # TODO(BEAM-1290): Consider converting this to an error log based on
         # theresolution of the issue.
diff --git a/sdks/python/apache_beam/runners/dataflow/dataflow_runner_test.py b/sdks/python/apache_beam/runners/dataflow/dataflow_runner_test.py
index dd1de55c628..fd8c7fa7704 100644
--- a/sdks/python/apache_beam/runners/dataflow/dataflow_runner_test.py
+++ b/sdks/python/apache_beam/runners/dataflow/dataflow_runner_test.py
@@ -66,7 +66,8 @@ except ImportError:
 
 
 # SpecialParDo and SpecialDoFn are used in test_remote_runner_display_data.
-# Due to BEAM-8482, these need to be declared outside of the test method.
+# Due to https://github.com/apache/beam/issues/19848, these need to be declared
+# outside of the test method.
 # TODO: Should not subclass ParDo. Switch to PTransform as soon as
 # composite transforms support display data.
 class SpecialParDo(beam.ParDo):
@@ -294,7 +295,8 @@ class DataflowRunnerTest(unittest.TestCase, ExtraAssertionsMixin):
         p | ptransform.Create([1, 2, 3, 4, 5])
         | 'Do' >> SpecialParDo(SpecialDoFn(), now))
 
-    # TODO(BEAM-366) Enable runner API on this test.
+    # TODO(https://github.com/apache/beam/issues/18012) Enable runner API on
+    # this test.
     p.run(test_runner_api=False)
     job_dict = json.loads(str(remote_runner.job))
     steps = [
@@ -748,7 +750,9 @@ class DataflowRunnerTest(unittest.TestCase, ExtraAssertionsMixin):
         out = p | beam.Create([1]) | beam.io.WriteToBigQuery('some.table')
         out['destination_file_pairs'] | 'MyTransform' >> beam.Map(lambda _: _)
 
-  @unittest.skip('BEAM-3736: enable once CombineFnVisitor is fixed')
+  @unittest.skip(
+      'https://github.com/apache/beam/issues/18716: enable once '
+      'CombineFnVisitor is fixed')
   def test_unsupported_combinefn_detection(self):
     class CombinerWithNonDefaultSetupTeardown(combiners.CountCombineFn):
       def setup(self, *args, **kwargs):
diff --git a/sdks/python/apache_beam/runners/dataflow/internal/apiclient.py b/sdks/python/apache_beam/runners/dataflow/internal/apiclient.py
index e0872956529..0d3c67e222e 100644
--- a/sdks/python/apache_beam/runners/dataflow/internal/apiclient.py
+++ b/sdks/python/apache_beam/runners/dataflow/internal/apiclient.py
@@ -1310,8 +1310,9 @@ def _verify_interpreter_version_is_supported(pipeline_options):
 # This is required for the legacy python dataflow runner, as portability
 # does not communicate to the service via python code, but instead via a
 # a runner harness (in C++ or Java).
-# TODO(BEAM-7050) : Remove this antipattern, legacy dataflow python
-# pipelines will break whenever a new cy_combiner type is used.
+# TODO(https://github.com/apache/beam/issues/19433) : Remove this antipattern,
+# legacy dataflow python pipelines will break whenever a new cy_combiner type
+# is used.
 structured_counter_translations = {
     cy_combiners.CountCombineFn: (
         dataflow.CounterMetadata.KindValueValuesEnum.SUM,
diff --git a/sdks/python/apache_beam/runners/dataflow/test_dataflow_runner.py b/sdks/python/apache_beam/runners/dataflow/test_dataflow_runner.py
index 58bc05c3950..1550034afc7 100644
--- a/sdks/python/apache_beam/runners/dataflow/test_dataflow_runner.py
+++ b/sdks/python/apache_beam/runners/dataflow/test_dataflow_runner.py
@@ -52,8 +52,8 @@ class TestDataflowRunner(DataflowRunner):
 
     self.result = super().run_pipeline(pipeline, options)
     if self.result.has_job:
-      # TODO(markflyhigh)(BEAM-1890): Use print since Nose dosen't show logs
-      # in some cases.
+      # TODO(markflyhigh)(https://github.com/apache/beam/issues/18254): Use
+      # print since Nose dosen't show logs in some cases.
       print('Worker logs: %s' % self.build_console_url(options))
       _LOGGER.info('Console log: ')
       _LOGGER.info(self.build_console_url(options))
diff --git a/sdks/python/apache_beam/runners/direct/direct_runner.py b/sdks/python/apache_beam/runners/direct/direct_runner.py
index 9777437e25c..6a48b1db5d1 100644
--- a/sdks/python/apache_beam/runners/direct/direct_runner.py
+++ b/sdks/python/apache_beam/runners/direct/direct_runner.py
@@ -416,7 +416,8 @@ class _DirectWriteToPubSubFn(DoFn):
     self.timestamp_attribute = transform.timestamp_attribute
     self.with_attributes = transform.with_attributes
 
-    # TODO(BEAM-4275): Add support for id_label and timestamp_attribute.
+    # TODO(https://github.com/apache/beam/issues/18939): Add support for
+    # id_label and timestamp_attribute.
     if transform.id_label:
       raise NotImplementedError(
           'DirectRunner: id_label is not supported for '
diff --git a/sdks/python/apache_beam/runners/direct/direct_runner_test.py b/sdks/python/apache_beam/runners/direct/direct_runner_test.py
index 33d7c2cbd58..58cec732d3f 100644
--- a/sdks/python/apache_beam/runners/direct/direct_runner_test.py
+++ b/sdks/python/apache_beam/runners/direct/direct_runner_test.py
@@ -136,8 +136,8 @@ class BundleBasedRunnerTest(unittest.TestCase):
 
 class DirectRunnerRetryTests(unittest.TestCase):
   def test_retry_fork_graph(self):
-    # TODO(BEAM-3642): The FnApiRunner currently does not currently support
-    # retries.
+    # TODO(https://github.com/apache/beam/issues/18640): The FnApiRunner
+    # currently does not currently support retries.
     p = beam.Pipeline(runner='BundleBasedDirectRunner')
 
     # TODO(mariagh): Remove the use of globals from the test.
diff --git a/sdks/python/apache_beam/runners/direct/transform_evaluator.py b/sdks/python/apache_beam/runners/direct/transform_evaluator.py
index f68a63084fd..a0600255028 100644
--- a/sdks/python/apache_beam/runners/direct/transform_evaluator.py
+++ b/sdks/python/apache_beam/runners/direct/transform_evaluator.py
@@ -593,7 +593,8 @@ class _PubSubReadEvaluator(_TransformEvaluator):
   """TransformEvaluator for PubSub read."""
 
   # A mapping of transform to _PubSubSubscriptionWrapper.
-  # TODO(BEAM-7750): Prevents garbage collection of pipeline instances.
+  # TODO(https://github.com/apache/beam/issues/19751): Prevents garbage
+  # collection of pipeline instances.
   _subscription_cache = {}  # type: Dict[AppliedPTransform, str]
 
   def __init__(
@@ -924,7 +925,7 @@ class _GroupByKeyOnlyEvaluator(_TransformEvaluator):
     self.output_pcollection = list(self._outputs)[0]
 
     # The output type of a GroupByKey will be Tuple[Any, Any] or more specific.
-    # TODO(BEAM-2717): Infer coders earlier.
+    # TODO(https://github.com/apache/beam/issues/18490): Infer coders earlier.
     kv_type_hint = (
         self._applied_ptransform.outputs[None].element_type or
         self._applied_ptransform.transform.get_type_hints().input_types[0][0])
diff --git a/sdks/python/apache_beam/runners/interactive/augmented_pipeline.py b/sdks/python/apache_beam/runners/interactive/augmented_pipeline.py
index 1cfc5bc959c..c1adc0c4a4f 100644
--- a/sdks/python/apache_beam/runners/interactive/augmented_pipeline.py
+++ b/sdks/python/apache_beam/runners/interactive/augmented_pipeline.py
@@ -70,8 +70,9 @@ class AugmentedPipeline:
   def augmented_pipeline(self) -> beam_runner_api_pb2.Pipeline:
     return self.augment()
 
-  # TODO(BEAM-10708): Support generating a background recording job that
-  # contains unbound source recording transforms only.
+  # TODO(https://github.com/apache/beam/issues/20526): Support generating a
+  # background recording job that contains unbound source recording transforms
+  # only.
   @property
   def background_recording_pipeline(self) -> beam_runner_api_pb2.Pipeline:
     raise NotImplementedError
@@ -123,6 +124,6 @@ class AugmentedPipeline:
           self._context,
           self._cache_manager,
           self._cacheables[writecache_pcoll]).write_cache()
-    # TODO(BEAM-10708): Support streaming, add pruning logic, and integrate
-    # pipeline fragment logic.
+    # TODO(https://github.com/apache/beam/issues/20526): Support streaming, add
+    # pruning logic, and integrate pipeline fragment logic.
     return pipeline
diff --git a/sdks/python/apache_beam/runners/interactive/caching/streaming_cache.py b/sdks/python/apache_beam/runners/interactive/caching/streaming_cache.py
index aefb97a2a73..064246a9708 100644
--- a/sdks/python/apache_beam/runners/interactive/caching/streaming_cache.py
+++ b/sdks/python/apache_beam/runners/interactive/caching/streaming_cache.py
@@ -58,9 +58,9 @@ class StreamingCacheSink(beam.PTransform):
   transforms are writing to the same file. This PTransform is assumed to only
   run correctly with the DirectRunner.
 
-  TODO(BEAM-9447): Generalize this to more source/sink types aside from file
-  based. Also, generalize to cases where there might be multiple workers
-  writing to the same sink.
+  TODO(https://github.com/apache/beam/issues/20002): Generalize this to more
+  source/sink types aside from file based. Also, generalize to cases where
+  there might be multiple workers writing to the same sink.
   """
   def __init__(
       self,
diff --git a/sdks/python/apache_beam/runners/interactive/dataproc/dataproc_cluster_manager.py b/sdks/python/apache_beam/runners/interactive/dataproc/dataproc_cluster_manager.py
index 7332e59c339..f26f244fdb9 100644
--- a/sdks/python/apache_beam/runners/interactive/dataproc/dataproc_cluster_manager.py
+++ b/sdks/python/apache_beam/runners/interactive/dataproc/dataproc_cluster_manager.py
@@ -122,8 +122,9 @@ class DataprocClusterManager:
         'cluster_name': self.cluster_metadata.cluster_name,
         'config': {
             'software_config': {
-                # TODO(BEAM-14142): Uncomment these lines when a Dataproc
-                # image is released with previously missing dependencies.
+                # TODO(https://github.com/apache/beam/issues/21527): Uncomment
+                # these lines when a Dataproc image is released with previously
+                # missing dependencies.
                 # 'image_version': ie.current_env().clusters.
                 # DATAPROC_IMAGE_VERSION,
                 'optional_components': ['DOCKER', 'FLINK']
diff --git a/sdks/python/apache_beam/runners/interactive/interactive_beam.py b/sdks/python/apache_beam/runners/interactive/interactive_beam.py
index 9db657e65b5..3537a042f96 100644
--- a/sdks/python/apache_beam/runners/interactive/interactive_beam.py
+++ b/sdks/python/apache_beam/runners/interactive/interactive_beam.py
@@ -405,8 +405,9 @@ class Clusters:
   # The minimum worker number to create a Dataproc cluster.
   DATAPROC_MINIMUM_WORKER_NUM = 2
 
-  # TODO(BEAM-14142): Fix the Dataproc image version after a released image
-  # contains all missing dependencies for Flink to run.
+  # TODO(https://github.com/apache/beam/issues/21527): Fix the Dataproc image
+  # version after a released image contains all missing dependencies for Flink
+  # to run.
   # DATAPROC_IMAGE_VERSION = '2.0.XX-debian10'
 
   def __init__(self) -> None:
diff --git a/sdks/python/apache_beam/runners/interactive/interactive_runner.py b/sdks/python/apache_beam/runners/interactive/interactive_runner.py
index 04338015c50..dd73cf60d61 100644
--- a/sdks/python/apache_beam/runners/interactive/interactive_runner.py
+++ b/sdks/python/apache_beam/runners/interactive/interactive_runner.py
@@ -86,7 +86,8 @@ class InteractiveRunner(runners.PipelineRunner):
     self._blocking = blocking
 
   def is_fnapi_compatible(self):
-    # TODO(BEAM-8436): return self._underlying_runner.is_fnapi_compatible()
+    # TODO(https://github.com/apache/beam/issues/19937):
+    # return self._underlying_runner.is_fnapi_compatible()
     return False
 
   def set_render_option(self, render_option):
diff --git a/sdks/python/apache_beam/runners/interactive/recording_manager.py b/sdks/python/apache_beam/runners/interactive/recording_manager.py
index 2d272edaee2..92d5787080c 100644
--- a/sdks/python/apache_beam/runners/interactive/recording_manager.py
+++ b/sdks/python/apache_beam/runners/interactive/recording_manager.py
@@ -304,8 +304,8 @@ class RecordingManager:
     # Convert them one-by-one to generate a unique label for each. This allows
     # caching at a more fine-grained granularity.
     #
-    # TODO(BEAM-12388): investigate the mixing pcollections in multiple
-    # pipelines error when using the default label.
+    # TODO(https://github.com/apache/beam/issues/20929): investigate the mixing
+    # pcollections in multiple pipelines error when using the default label.
     for df in watched_dataframes:
       pcoll, _ = utils.deferred_df_to_pcollection(df)
       watched_pcollections.add(pcoll)
diff --git a/sdks/python/apache_beam/runners/interactive/utils.py b/sdks/python/apache_beam/runners/interactive/utils.py
index 68b4fceaa8c..35941eca7de 100644
--- a/sdks/python/apache_beam/runners/interactive/utils.py
+++ b/sdks/python/apache_beam/runners/interactive/utils.py
@@ -304,8 +304,8 @@ def deferred_df_to_pcollection(df):
 
   # The proxy is used to output a DataFrame with the correct columns.
   #
-  # TODO(BEAM-11064): Once type hints are implemented for pandas, use those
-  # instead of the proxy.
+  # TODO(https://github.com/apache/beam/issues/20577): Once type hints are
+  # implemented for pandas, use those instead of the proxy.
   cache = ExpressionCache()
   cache.replace_with_cached(df._expr)
 
diff --git a/sdks/python/apache_beam/runners/pipeline_context.py b/sdks/python/apache_beam/runners/pipeline_context.py
index 7cefe3956c9..358a80e4efb 100644
--- a/sdks/python/apache_beam/runners/pipeline_context.py
+++ b/sdks/python/apache_beam/runners/pipeline_context.py
@@ -193,7 +193,8 @@ class PipelineContext(object):
     self.component_id_map = component_id_map or ComponentIdMap(namespace)
     assert self.component_id_map.namespace == namespace
 
-    # TODO(BEAM-12084) Initialize component_id_map with objects from proto.
+    # TODO(https://github.com/apache/beam/issues/20827) Initialize
+    # component_id_map with objects from proto.
     self.transforms = _PipelineContextMap(
         self,
         pipeline.AppliedPTransform,
@@ -242,7 +243,8 @@ class PipelineContext(object):
   # If fake coders are requested, return a pickled version of the element type
   # rather than an actual coder. The element type is required for some runners,
   # as well as performing a round-trip through protos.
-  # TODO(BEAM-2717): Remove once this is no longer needed.
+  # TODO(https://github.com/apache/beam/issues/18490): Remove once this is no
+  # longer needed.
   def coder_id_from_element_type(
       self, element_type, requires_deterministic_key_coder=None):
     # type: (Any, Optional[str]) -> str
diff --git a/sdks/python/apache_beam/runners/portability/flink_runner_test.py b/sdks/python/apache_beam/runners/portability/flink_runner_test.py
index cb6345e77ef..48e5df54d2d 100644
--- a/sdks/python/apache_beam/runners/portability/flink_runner_test.py
+++ b/sdks/python/apache_beam/runners/portability/flink_runner_test.py
@@ -389,19 +389,20 @@ class FlinkRunnerTest(portable_runner_test.PortableRunnerTest):
     raise unittest.SkipTest("BEAM-2939")
 
   def test_callbacks_with_exception(self):
-    raise unittest.SkipTest("BEAM-11021")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19526")
 
   def test_register_finalizations(self):
-    raise unittest.SkipTest("BEAM-11021")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19526")
 
   def test_custom_merging_window(self):
-    raise unittest.SkipTest("BEAM-11004")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/20641")
 
   # Inherits all other tests.
 
 
 class FlinkRunnerTestOptimized(FlinkRunnerTest):
-  # TODO: Remove these tests after resolving BEAM-7248 and enabling
+  # TODO: Remove these tests after resolving
+  #  https://github.com/apache/beam/issues/19422 and enabling
   #  PortableRunnerOptimized
   def create_options(self):
     options = super().create_options()
@@ -411,16 +412,16 @@ class FlinkRunnerTestOptimized(FlinkRunnerTest):
     return options
 
   def test_external_transform(self):
-    raise unittest.SkipTest("BEAM-7252")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19461")
 
   def test_expand_kafka_read(self):
-    raise unittest.SkipTest("BEAM-7252")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19461")
 
   def test_expand_kafka_write(self):
-    raise unittest.SkipTest("BEAM-7252")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19461")
 
   def test_sql(self):
-    raise unittest.SkipTest("BEAM-7252")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19461")
 
   def test_pack_combiners(self):
     # Stages produced by translations.pack_combiners are fused
diff --git a/sdks/python/apache_beam/runners/portability/fn_api_runner/fn_runner_test.py b/sdks/python/apache_beam/runners/portability/fn_api_runner/fn_runner_test.py
index c65578eba25..2218f8d9018 100644
--- a/sdks/python/apache_beam/runners/portability/fn_api_runner/fn_runner_test.py
+++ b/sdks/python/apache_beam/runners/portability/fn_api_runner/fn_runner_test.py
@@ -170,8 +170,8 @@ class FnApiRunnerTest(unittest.TestCase):
       utils.check_compiled('apache_beam.coders.coder_impl')
     except RuntimeError:
       self.skipTest(
-          'BEAM-14410: FnRunnerTest with non-trivial inputs flakes '
-          'in non-cython environments')
+          'https://github.com/apache/beam/issues/21643: FnRunnerTest with '
+          'non-trivial inputs flakes in non-cython environments')
 
     with self.create_pipeline() as p:
       res = (
@@ -372,8 +372,8 @@ class FnApiRunnerTest(unittest.TestCase):
       utils.check_compiled('apache_beam.coders.coder_impl')
     except RuntimeError:
       self.skipTest(
-          'BEAM-14410: FnRunnerTest with non-trivial inputs flakes in '
-          'non-cython environments')
+          'https://github.com/apache/beam/issues/21643: FnRunnerTest with '
+          'non-trivial inputs flakes in non-cython environments')
     with self.create_pipeline() as p:
       res = (
           p
@@ -454,7 +454,7 @@ class FnApiRunnerTest(unittest.TestCase):
                 ExpectingSideInputsFn(f'Do{k}'),
                 *[beam.pvalue.AsList(inputs[s]) for s in range(1, k)]))
 
-  @unittest.skip('BEAM-13040')
+  @unittest.skip('https://github.com/apache/beam/issues/21228')
   def test_pardo_side_input_sparse_dependencies(self):
     with self.create_pipeline() as p:
       inputs = []
@@ -1784,7 +1784,7 @@ class FnApiRunnerTestWithMultiWorkers(FnApiRunnerTest):
     p = beam.Pipeline(
         runner=fn_api_runner.FnApiRunner(is_drain=is_drain),
         options=pipeline_options)
-    #TODO(BEAM-8444): Fix these tests.
+    #TODO(https://github.com/apache/beam/issues/19936): Fix these tests.
     p._options.view_as(DebugOptions).experiments.remove('beam_fn_api')
     return p
 
@@ -1814,7 +1814,7 @@ class FnApiRunnerTestWithGrpcAndMultiWorkers(FnApiRunnerTest):
     p = beam.Pipeline(
         runner=fn_api_runner.FnApiRunner(is_drain=is_drain),
         options=pipeline_options)
-    #TODO(BEAM-8444): Fix these tests.
+    #TODO(https://github.com/apache/beam/issues/19936): Fix these tests.
     p._options.view_as(DebugOptions).experiments.remove('beam_fn_api')
     return p
 
@@ -1852,7 +1852,7 @@ class FnApiRunnerTestWithBundleRepeatAndMultiWorkers(FnApiRunnerTest):
     p = beam.Pipeline(
         runner=fn_api_runner.FnApiRunner(bundle_repeat=3, is_drain=is_drain),
         options=pipeline_options)
-    #TODO(BEAM-8444): Fix these tests.
+    #TODO(https://github.com/apache/beam/issues/19936): Fix these tests.
     p._options.view_as(DebugOptions).experiments.remove('beam_fn_api')
     return p
 
diff --git a/sdks/python/apache_beam/runners/portability/fn_api_runner/translations.py b/sdks/python/apache_beam/runners/portability/fn_api_runner/translations.py
index 5ed965fb751..ba2f482ee9a 100644
--- a/sdks/python/apache_beam/runners/portability/fn_api_runner/translations.py
+++ b/sdks/python/apache_beam/runners/portability/fn_api_runner/translations.py
@@ -1111,9 +1111,9 @@ def pack_per_key_combiners(stages, context, can_pack=lambda s: True):
             is_bounded=input_pcoll.is_bounded))
 
     # Set up Pack stage.
-    # TODO(BEAM-7746): classes that inherit from RunnerApiFn are expected to
-    #  accept a PipelineContext for from_runner_api/to_runner_api.  Determine
-    #  how to accomodate this.
+    # TODO(https://github.com/apache/beam/issues/19737): classes that inherit
+    #  from RunnerApiFn are expected to accept a PipelineContext for
+    #  from_runner_api/to_runner_api.  Determine how to accomodate this.
     pack_combine_fn = combiners.SingleInputTupleCombineFn(
         *[
             core.CombineFn.from_runner_api(combine_payload.combine_fn, context)  # type: ignore[arg-type]
diff --git a/sdks/python/apache_beam/runners/portability/fn_api_runner/worker_handlers.py b/sdks/python/apache_beam/runners/portability/fn_api_runner/worker_handlers.py
index 4e0f68dadd0..6f94f3efc4e 100644
--- a/sdks/python/apache_beam/runners/portability/fn_api_runner/worker_handlers.py
+++ b/sdks/python/apache_beam/runners/portability/fn_api_runner/worker_handlers.py
@@ -643,7 +643,8 @@ class ExternalWorkerHandler(GrpcWorkerHandler):
 
   def host_from_worker(self):
     # type: () -> str
-    # TODO(BEAM-8646): Reconcile across platforms.
+    # TODO(https://github.com/apache/beam/issues/19947): Reconcile across
+    # platforms.
     if sys.platform in ['win32', 'darwin']:
       return 'localhost'
     import socket
diff --git a/sdks/python/apache_beam/runners/portability/local_job_service.py b/sdks/python/apache_beam/runners/portability/local_job_service.py
index 25ab6176ad0..d0d52a2c778 100644
--- a/sdks/python/apache_beam/runners/portability/local_job_service.py
+++ b/sdks/python/apache_beam/runners/portability/local_job_service.py
@@ -323,7 +323,9 @@ class BeamJob(abstract_job_service.AbstractBeamJob):
         env.dependencies.extend(deps)
       self._provision_info.provision_info.ClearField('retrieval_token')
     except concurrent.futures.TimeoutError:
-      pass  # TODO(BEAM-9577): Require this once all SDKs support it.
+      # TODO(https://github.com/apache/beam/issues/20267): Require this once
+      # all SDKs support it.
+      pass
 
   def cancel(self):
     if not self.is_terminal_state(self.state):
diff --git a/sdks/python/apache_beam/runners/portability/portable_runner.py b/sdks/python/apache_beam/runners/portability/portable_runner.py
index e96f13d16d1..1283b06ace3 100644
--- a/sdks/python/apache_beam/runners/portability/portable_runner.py
+++ b/sdks/python/apache_beam/runners/portability/portable_runner.py
@@ -232,7 +232,8 @@ class JobServiceHandle(object):
           beam_job_api_pb2.JobMessagesRequest(job_id=preparation_id),
           timeout=self.timeout)
     except Exception:
-      # TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
+      # TODO(https://github.com/apache/beam/issues/19284): Unify preparation_id
+      # and job_id for all runners.
       state_stream = message_stream = None
 
     # Run the job and wait for a result, we don't set a timeout here because
@@ -361,8 +362,8 @@ class PortableRunner(runner.PipelineRunner):
         ]
         partial = False
       elif pre_optimize == 'all_except_fusion':
-        # TODO(BEAM-7248): Delete this branch after PortableRunner supports
-        # beam:runner:executable_stage:v1.
+        # TODO(https://github.com/apache/beam/issues/19422): Delete this branch
+        # after PortableRunner supports beam:runner:executable_stage:v1.
         phases = [
             translations.annotate_downstream_side_inputs,
             translations.annotate_stateful_dofns_as_roots,
diff --git a/sdks/python/apache_beam/runners/portability/portable_runner_test.py b/sdks/python/apache_beam/runners/portability/portable_runner_test.py
index e13b25d8eba..78a603eef60 100644
--- a/sdks/python/apache_beam/runners/portability/portable_runner_test.py
+++ b/sdks/python/apache_beam/runners/portability/portable_runner_test.py
@@ -221,7 +221,7 @@ class PortableRunnerTest(fn_runner_test.FnApiRunnerTest):
     raise unittest.SkipTest("Portable runners don't support drain yet.")
 
 
-@unittest.skip("BEAM-7248")
+@unittest.skip("https://github.com/apache/beam/issues/19422")
 class PortableRunnerOptimized(PortableRunnerTest):
   def create_options(self):
     options = super().create_options()
@@ -232,8 +232,8 @@ class PortableRunnerOptimized(PortableRunnerTest):
     return options
 
 
-# TODO(BEAM-7248): Delete this test after PortableRunner supports
-# beam:runner:executable_stage:v1.
+# TODO(https://github.com/apache/beam/issues/19422): Delete this test after
+# PortableRunner supports beam:runner:executable_stage:v1.
 class PortableRunnerOptimizedWithoutFusion(PortableRunnerTest):
   def create_options(self):
     options = super().create_options()
@@ -263,7 +263,9 @@ class PortableRunnerTestWithExternalEnv(PortableRunnerTest):
     return options
 
 
-@pytest.mark.skipif(sys.platform == "win32", reason="[BEAM-10625]")
+@pytest.mark.skipif(
+    sys.platform == "win32",
+    reason="[https://github.com/apache/beam/issues/20427]")
 class PortableRunnerTestWithSubprocesses(PortableRunnerTest):
   _use_subprocesses = True
 
diff --git a/sdks/python/apache_beam/runners/portability/samza_runner_test.py b/sdks/python/apache_beam/runners/portability/samza_runner_test.py
index 2ba6781bee4..a56be8da11c 100644
--- a/sdks/python/apache_beam/runners/portability/samza_runner_test.py
+++ b/sdks/python/apache_beam/runners/portability/samza_runner_test.py
@@ -135,7 +135,7 @@ class SamzaRunnerTest(portable_runner_test.PortableRunnerTest):
 
   def test_metrics(self):
     # Skip until Samza portable runner supports distribution metrics.
-    raise unittest.SkipTest("BEAM-12614")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/21043")
 
   def test_flattened_side_input(self):
     # Blocked on support for transcoding
@@ -150,31 +150,31 @@ class SamzaRunnerTest(portable_runner_test.PortableRunnerTest):
 
   def test_pardo_timers(self):
     # Skip until Samza portable runner supports clearing timer.
-    raise unittest.SkipTest("BEAM-12774")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/21059")
 
   def test_register_finalizations(self):
     # Skip until Samza runner supports bundle finalization.
-    raise unittest.SkipTest("BEAM-12615")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/21044")
 
   def test_callbacks_with_exception(self):
     # Skip until Samza runner supports bundle finalization.
-    raise unittest.SkipTest("BEAM-12615")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/21044")
 
   def test_sdf_with_dofn_as_watermark_estimator(self):
     # Skip until Samza runner supports SDF and self-checkpoint.
-    raise unittest.SkipTest("BEAM-12616")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/21045")
 
   def test_sdf_with_sdf_initiated_checkpointing(self):
     # Skip until Samza runner supports SDF.
-    raise unittest.SkipTest("BEAM-12616")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/21045")
 
   def test_sdf_with_watermark_tracking(self):
     # Skip until Samza runner supports SDF.
-    raise unittest.SkipTest("BEAM-12616")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/21045")
 
   def test_custom_merging_window(self):
     # Skip until Samza runner supports merging window fns
-    raise unittest.SkipTest("BEAM-12617")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/21049")
 
 
 if __name__ == '__main__':
diff --git a/sdks/python/apache_beam/runners/portability/spark_runner_test.py b/sdks/python/apache_beam/runners/portability/spark_runner_test.py
index fb84c27a91e..488222f2f2f 100644
--- a/sdks/python/apache_beam/runners/portability/spark_runner_test.py
+++ b/sdks/python/apache_beam/runners/portability/spark_runner_test.py
@@ -139,38 +139,38 @@ class SparkRunnerTest(portable_runner_test.PortableRunnerTest):
 
   def test_metrics(self):
     # Skip until Spark runner supports metrics.
-    raise unittest.SkipTest("BEAM-7219")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19496")
 
   def test_sdf(self):
     # Skip until Spark runner supports SDF.
-    raise unittest.SkipTest("BEAM-7222")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19468")
 
   def test_sdf_with_watermark_tracking(self):
     # Skip until Spark runner supports SDF.
-    raise unittest.SkipTest("BEAM-7222")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19468")
 
   def test_sdf_with_sdf_initiated_checkpointing(self):
     # Skip until Spark runner supports SDF.
-    raise unittest.SkipTest("BEAM-7222")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19468")
 
   def test_sdf_synthetic_source(self):
     # Skip until Spark runner supports SDF.
-    raise unittest.SkipTest("BEAM-7222")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19468")
 
   def test_callbacks_with_exception(self):
     # Skip until Spark runner supports bundle finalization.
-    raise unittest.SkipTest("BEAM-7233")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19517")
 
   def test_register_finalizations(self):
     # Skip until Spark runner supports bundle finalization.
-    raise unittest.SkipTest("BEAM-7233")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19517")
 
   def test_sdf_with_dofn_as_watermark_estimator(self):
     # Skip until Spark runner supports SDF and self-checkpoint.
-    raise unittest.SkipTest("BEAM-7222")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/19468")
 
   def test_pardo_dynamic_timer(self):
-    raise unittest.SkipTest("BEAM-9912")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/20179")
 
   def test_flattened_side_input(self):
     # Blocked on support for transcoding
@@ -178,7 +178,7 @@ class SparkRunnerTest(portable_runner_test.PortableRunnerTest):
     super().test_flattened_side_input(with_transcoding=False)
 
   def test_custom_merging_window(self):
-    raise unittest.SkipTest("BEAM-11004")
+    raise unittest.SkipTest("https://github.com/apache/beam/issues/20641")
 
   # Inherits all other tests from PortableRunnerTest.
 
diff --git a/sdks/python/apache_beam/runners/portability/spark_uber_jar_job_server.py b/sdks/python/apache_beam/runners/portability/spark_uber_jar_job_server.py
index 2f880d208c3..832f3142cb6 100644
--- a/sdks/python/apache_beam/runners/portability/spark_uber_jar_job_server.py
+++ b/sdks/python/apache_beam/runners/portability/spark_uber_jar_job_server.py
@@ -248,8 +248,8 @@ class SparkBeamJob(abstract_job_service.UberJarBeamJob):
             message_text=response['message'])
         yield message
         message_ix += 1
-        # TODO(BEAM-8983) In the event of a failure, query
-        #  additional info from Spark master and/or workers.
+        # TODO(https://github.com/apache/beam/issues/20019) In the event of a
+        #  failure, query additional info from Spark master and/or workers.
       check_timestamp = self.set_state(state)
       if check_timestamp is not None:
         if message:
diff --git a/sdks/python/apache_beam/runners/portability/stager_test.py b/sdks/python/apache_beam/runners/portability/stager_test.py
index 9d24964f395..b221bb1ec6f 100644
--- a/sdks/python/apache_beam/runners/portability/stager_test.py
+++ b/sdks/python/apache_beam/runners/portability/stager_test.py
@@ -52,7 +52,8 @@ class StagerTest(unittest.TestCase):
     if self._temp_dir:
       shutil.rmtree(self._temp_dir)
     self.stager = None
-    # [BEAM-13769] set pickler to dill by default.
+    # [https://github.com/apache/beam/issues/21457] set pickler to dill by
+    # default.
     pickler.set_library(pickler.DEFAULT_PICKLE_LIB)
 
   def make_temp_dir(self):
@@ -225,8 +226,8 @@ class StagerTest(unittest.TestCase):
   @pytest.mark.no_xdist
   @unittest.skipIf(
       sys.platform == "win32" and sys.version_info < (3, 8),
-      'BEAM-10987: pytest on Windows pulls in a zipimporter, unpicklable '
-      'before py3.8')
+      'https://github.com/apache/beam/issues/20659: pytest on Windows pulls '
+      'in a zipimporter, unpicklable before py3.8')
   def test_with_main_session(self):
     staging_dir = self.make_temp_dir()
     options = PipelineOptions()
@@ -242,8 +243,8 @@ class StagerTest(unittest.TestCase):
         os.path.isfile(
             os.path.join(staging_dir, names.PICKLED_MAIN_SESSION_FILE)))
 
-  # (BEAM-13769): Remove the decorator once cloudpickle is default pickle
-  # library
+  # (https://github.com/apache/beam/issues/21457): Remove the decorator once
+  # cloudpickle is default pickle library
   @pytest.mark.no_xdist
   def test_main_session_not_staged_when_using_cloudpickle(self):
     staging_dir = self.make_temp_dir()
diff --git a/sdks/python/apache_beam/runners/worker/bundle_processor.py b/sdks/python/apache_beam/runners/worker/bundle_processor.py
index 0d993dd5413..6d0e050cd34 100644
--- a/sdks/python/apache_beam/runners/worker/bundle_processor.py
+++ b/sdks/python/apache_beam/runners/worker/bundle_processor.py
@@ -238,7 +238,8 @@ class DataInputOperation(RunnerIOOperation):
         read_progress_info)] = read_progress_info
     return all_monitoring_infos
 
-  # TODO(BEAM-7746): typing not compatible with super type
+  # TODO(https://github.com/apache/beam/issues/19737): typing not compatible
+  # with super type
   def try_split(  # type: ignore[override]
       self, fraction_of_remainder, total_buffer_size, allowed_split_points):
     # type: (...) -> Optional[Tuple[int, Iterable[operations.SdfSplitResultsPrimary], Iterable[operations.SdfSplitResultsResidual], int]]
diff --git a/sdks/python/apache_beam/runners/worker/data_plane.py b/sdks/python/apache_beam/runners/worker/data_plane.py
index 2509eb8d3e8..cff0ea2b28a 100644
--- a/sdks/python/apache_beam/runners/worker/data_plane.py
+++ b/sdks/python/apache_beam/runners/worker/data_plane.py
@@ -795,9 +795,9 @@ class GrpcClientDataChannelFactory(DataChannelFactory):
   def create_data_channel(self, remote_grpc_port):
     # type: (beam_fn_api_pb2.RemoteGrpcPort) -> GrpcClientDataChannel
     url = remote_grpc_port.api_service_descriptor.url
-    # TODO(BEAM-7746): this can return None if url is falsey, but this seems
-    #  incorrect, as code that calls this method seems to always expect
-    #  non-Optional values.
+    # TODO(https://github.com/apache/beam/issues/19737): this can return None
+    #  if url is falsey, but this seems incorrect, as code that calls this
+    #  method seems to always expect non-Optional values.
     return self.create_data_channel_from_url(url)  # type: ignore[return-value]
 
   def close(self):
diff --git a/sdks/python/apache_beam/runners/worker/operations.py b/sdks/python/apache_beam/runners/worker/operations.py
index 09706fe7187..6f051142623 100644
--- a/sdks/python/apache_beam/runners/worker/operations.py
+++ b/sdks/python/apache_beam/runners/worker/operations.py
@@ -329,8 +329,8 @@ class GeneralPurposeConsumerSet(ConsumerSet):
       if len(self._batched_elements) >= self.MAX_BATCH_SIZE:
         self.flush()
 
-    # TODO(BEAM-14408): Properly estimate sizes in the batch-consumer only case,
-    # this undercounts large iterables
+    # TODO(https://github.com/apache/beam/issues/21655): Properly estimate
+    # sizes in the batch-consumer only case, this undercounts large iterables
     self.update_counters_finish()
 
   def receive_batch(self, windowed_batch):
diff --git a/sdks/python/apache_beam/runners/worker/sdk_worker_main.py b/sdks/python/apache_beam/runners/worker/sdk_worker_main.py
index 14008780381..fcc168f2611 100644
--- a/sdks/python/apache_beam/runners/worker/sdk_worker_main.py
+++ b/sdks/python/apache_beam/runners/worker/sdk_worker_main.py
@@ -78,7 +78,8 @@ def create_harness(environment, dry_run=False):
 
       # Send all logs to the runner.
       fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
-      # TODO(BEAM-5468): This should be picked up from pipeline options.
+      # TODO(https://github.com/apache/beam/issues/19242): This should be
+      # picked up from pipeline options.
       logging.getLogger().setLevel(logging.INFO)
       logging.getLogger().addHandler(fn_log_handler)
       _LOGGER.info('Logging handler created.')
diff --git a/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query10.py b/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query10.py
index 8640e50867f..49c428ef78c 100644
--- a/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query10.py
+++ b/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query10.py
@@ -47,8 +47,9 @@ class OutputFile(object):
 
 
 def open_writable_gcs_file(options, filename):
-  # TODO: [BEAM-10879] it seems that beam team has not yet decided about this
-  #   method and it is left blank and unspecified.
+  # TODO: [https://github.com/apache/beam/issues/20670] it seems that beam team
+  #   has not yet decided about this method and it is left blank and
+  #   unspecified.
   pass
 
 
diff --git a/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query4.py b/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query4.py
index 81f35224620..ad6c63a88c3 100644
--- a/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query4.py
+++ b/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query4.py
@@ -64,7 +64,8 @@ def load(events, metadata=None, pipeline_options=None):
       # average for each category
       | beam.CombinePerKey(beam.combiners.MeanCombineFn())
       # TODO(leiyiz): fanout with sliding window produces duplicated results,
-      #   uncomment after it is fixed [BEAM-10617]
+      #   uncomment after it is fixed
+      #   [https://github.com/apache/beam/issues/20528]
       # .with_hot_key_fanout(metadata.get('fanout'))
       # produce output
       | beam.ParDo(ProjectToCategoryPriceFn()))
diff --git a/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query5.py b/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query5.py
index 3c5e572fed0..a55d31de609 100644
--- a/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query5.py
+++ b/sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query5.py
@@ -51,7 +51,8 @@ def load(events, metadata=None, pipeline_options=None):
       | 'bid_max_count' >> beam.CombineGlobally(
           MostBidCombineFn()).without_defaults()
       # TODO(leiyiz): fanout with sliding window produces duplicated results,
-      #   uncomment after it is fixed [BEAM-10617]
+      #   uncomment after it is fixed
+      #   [https://github.com/apache/beam/issues/20528]
       # .with_fanout(metadata.get('fanout'))
       | beam.FlatMap(
           lambda auc_count: [{
diff --git a/sdks/python/apache_beam/testing/test_stream_test.py b/sdks/python/apache_beam/testing/test_stream_test.py
index 91c5ce6dd82..13b9e3c1769 100644
--- a/sdks/python/apache_beam/testing/test_stream_test.py
+++ b/sdks/python/apache_beam/testing/test_stream_test.py
@@ -373,9 +373,10 @@ class TestStreamTest(unittest.TestCase):
         | beam.Map(lambda x: ('k', x))
         | beam.GroupByKey())
 
-    # TODO(BEAM-2519): timestamp assignment for elements from a GBK should
-    # respect the TimestampCombiner.  The test below should also verify the
-    # timestamps of the outputted elements once this is implemented.
+    # TODO(https://github.com/apache/beam/issues/18441): timestamp assignment
+    # for elements from a GBK should respect the TimestampCombiner.  The test
+    # below should also verify the timestamps of the outputted elements once
+    # this is implemented.
 
     # assert per window
     expected_window_to_elements = {
@@ -418,9 +419,10 @@ class TestStreamTest(unittest.TestCase):
         | beam.Map(lambda x: ('k', x))
         | beam.GroupByKey())
 
-    # TODO(BEAM-2519): timestamp assignment for elements from a GBK should
-    # respect the TimestampCombiner.  The test below should also verify the
-    # timestamps of the outputted elements once this is implemented.
+    # TODO(https://github.com/apache/beam/issues/18441): timestamp assignment
+    # for elements from a GBK should respect the TimestampCombiner.  The test
+    # below should also verify the timestamps of the outputted elements once
+    # this is implemented.
 
     # assert per window
     expected_window_to_elements = {
@@ -460,9 +462,10 @@ class TestStreamTest(unittest.TestCase):
         | beam.Map(lambda x: ('k', x))
         | beam.GroupByKey())
 
-    # TODO(BEAM-2519): timestamp assignment for elements from a GBK should
-    # respect the TimestampCombiner.  The test below should also verify the
-    # timestamps of the outputted elements once this is implemented.
+    # TODO(https://github.com/apache/beam/issues/18441): timestamp assignment
+    # for elements from a GBK should respect the TimestampCombiner.  The test
+    # below should also verify the timestamps of the outputted elements once
+    # this is implemented.
 
     expected_window_to_elements = {
         window.IntervalWindow(0, 15): [('k', ['a'])],
diff --git a/sdks/python/apache_beam/tools/coders_microbenchmark.py b/sdks/python/apache_beam/tools/coders_microbenchmark.py
index 7476b628ce8..a564f739cf2 100644
--- a/sdks/python/apache_beam/tools/coders_microbenchmark.py
+++ b/sdks/python/apache_beam/tools/coders_microbenchmark.py
@@ -207,7 +207,8 @@ def run_coder_benchmarks(
     num_runs, input_size, seed, verbose, filter_regex='.*'):
   random.seed(seed)
 
-  # TODO(BEAM-4441): Pick coders using type hints, for example:
+  # TODO(https://github.com/apache/beam/issues/18788): Pick coders using type
+  # hints, for example:
   # tuple_coder = typecoders.registry.get_coder(typing.Tuple[int, ...])
   benchmarks = [
       coder_benchmark_factory(coders.FastPrimitivesCoder(), small_int),
diff --git a/sdks/python/apache_beam/transforms/combinefn_lifecycle_test.py b/sdks/python/apache_beam/transforms/combinefn_lifecycle_test.py
index a244f805ee3..6834c1d6174 100644
--- a/sdks/python/apache_beam/transforms/combinefn_lifecycle_test.py
+++ b/sdks/python/apache_beam/transforms/combinefn_lifecycle_test.py
@@ -72,7 +72,7 @@ class CombineFnLifecycleTest(unittest.TestCase):
   def test_combining_value_state(self):
     if ('DataflowRunner' in self.pipeline.get_pipeline_options().view_as(
         StandardOptions).runner):
-      self.skipTest('BEAM-11793')
+      self.skipTest('https://github.com/apache/beam/issues/20722')
     run_pardo(self.pipeline)
 
 
diff --git a/sdks/python/apache_beam/transforms/combiners.py b/sdks/python/apache_beam/transforms/combiners.py
index d4fdfb14c18..2aaadb866c9 100644
--- a/sdks/python/apache_beam/transforms/combiners.py
+++ b/sdks/python/apache_beam/transforms/combiners.py
@@ -373,8 +373,9 @@ class _MergeTopPerBundle(core.DoFn):
               for element in bundle
           ]
           continue
-        # TODO(BEAM-13117): Remove this workaround once legacy dataflow
-        # correctly handles coders with combiner packing and/or is deprecated.
+        # TODO(https://github.com/apache/beam/issues/21205): Remove this
+        # workaround once legacy dataflow correctly handles coders with
+        # combiner packing and/or is deprecated.
         if not isinstance(bundle, list):
           bundle = list(bundle)
         for element in reversed(bundle):
@@ -389,8 +390,9 @@ class _MergeTopPerBundle(core.DoFn):
     else:
       heap = []
       for bundle in bundles:
-        # TODO(BEAM-13117): Remove this workaround once legacy dataflow
-        # correctly handles coders with combiner packing and/or is deprecated.
+        # TODO(https://github.com/apache/beam/issues/21205): Remove this
+        # workaround once legacy dataflow correctly handles coders with
+        # combiner packing and/or is deprecated.
         if not isinstance(bundle, list):
           bundle = list(bundle)
         if not heap:
diff --git a/sdks/python/apache_beam/transforms/core.py b/sdks/python/apache_beam/transforms/core.py
index fdc458bce0b..64d142c2adf 100644
--- a/sdks/python/apache_beam/transforms/core.py
+++ b/sdks/python/apache_beam/transforms/core.py
@@ -705,7 +705,7 @@ class DoFn(WithTypeHints, HasDisplayData, urns.RunnerApiFn):
   # TODO(sourabhbajaj): Do we want to remove the responsibility of these from
   # the DoFn or maybe the runner
   def infer_output_type(self, input_type):
-    # TODO(BEAM-8247): Side inputs types.
+    # TODO(https://github.com/apache/beam/issues/19824): Side inputs types.
     return trivial_inference.element_type(
         self._strip_output_annotations(
             trivial_inference.infer_return_type(self.process, [input_type])))
@@ -759,8 +759,8 @@ class DoFn(WithTypeHints, HasDisplayData, urns.RunnerApiFn):
     input_type = list(
         inspect.signature(self.process_batch).parameters.values())[0].annotation
     if input_type == inspect.Signature.empty:
-      # TODO(BEAM-14340): Consider supporting an alternative (dynamic?) approach
-      # for declaring input type
+      # TODO(https://github.com/apache/beam/issues/21652): Consider supporting
+      # an alternative (dynamic?) approach for declaring input type
       raise TypeError(
           f"Either {self.__class__.__name__}.process_batch() must have a type "
           f"annotation on its first parameter, or {self.__class__.__name__} "
@@ -3386,7 +3386,8 @@ class Create(PTransform):
   def to_runner_api_parameter(self, context):
     # type: (PipelineContext) -> typing.Tuple[str, bytes]
     # Required as this is identified by type in PTransformOverrides.
-    # TODO(BEAM-3812): Use an actual URN here.
+    # TODO(https://github.com/apache/beam/issues/18713): Use an actual URN
+    # here.
     return self.to_runner_api_pickled(context)
 
   def infer_output_type(self, unused_input_type):
diff --git a/sdks/python/apache_beam/transforms/ptransform.py b/sdks/python/apache_beam/transforms/ptransform.py
index f3e57951e37..1ca87746baa 100644
--- a/sdks/python/apache_beam/transforms/ptransform.py
+++ b/sdks/python/apache_beam/transforms/ptransform.py
@@ -744,7 +744,7 @@ class PTransform(WithTypeHints, HasDisplayData, Generic[InputT, OutputT]):
     # typing: only ParDo supports extra_kwargs
     urn, typed_param = self.to_runner_api_parameter(context, **extra_kwargs)  # type: ignore[call-arg]
     if urn == python_urns.GENERIC_COMPOSITE_TRANSFORM and not has_parts:
-      # TODO(BEAM-3812): Remove this fallback.
+      # TODO(https://github.com/apache/beam/issues/18713): Remove this fallback.
       urn, typed_param = self.to_runner_api_pickled(context)
     return beam_runner_api_pb2.FunctionSpec(
         urn=urn,
diff --git a/sdks/python/apache_beam/transforms/ptransform_test.py b/sdks/python/apache_beam/transforms/ptransform_test.py
index f3ecd00cccc..9870ff7ed46 100644
--- a/sdks/python/apache_beam/transforms/ptransform_test.py
+++ b/sdks/python/apache_beam/transforms/ptransform_test.py
@@ -700,7 +700,8 @@ class PTransformTest(unittest.TestCase):
       result = (pcoll, ) | 'Single Flatten' >> beam.Flatten()
       assert_that(result, equal_to(input))
 
-  # TODO(BEAM-9002): Does not work in streaming mode on Dataflow.
+  # TODO(https://github.com/apache/beam/issues/20067): Does not work in
+  # streaming mode on Dataflow.
   @pytest.mark.no_sickbay_streaming
   @pytest.mark.it_validatesrunner
   def test_flatten_same_pcollections(self):
diff --git a/sdks/python/apache_beam/transforms/sideinputs_test.py b/sdks/python/apache_beam/transforms/sideinputs_test.py
index 29a88452cb5..4c6df9f9d8e 100644
--- a/sdks/python/apache_beam/transforms/sideinputs_test.py
+++ b/sdks/python/apache_beam/transforms/sideinputs_test.py
@@ -159,8 +159,8 @@ class SideInputsTest(unittest.TestCase):
     assert_that(result, equal_to([(1, 'empty'), (2, 'empty')]))
     pipeline.run()
 
-  # TODO(BEAM-5025): Disable this test in streaming temporarily.
-  # Remove sickbay-streaming tag after it's fixed.
+  # TODO(https://github.com/apache/beam/issues/19012): Disable this test in
+  # streaming temporarily. Remove sickbay-streaming tag after it's fixed.
   @pytest.mark.no_sickbay_streaming
   @pytest.mark.it_validatesrunner
   def test_multi_valued_singleton_side_input(self):
@@ -370,10 +370,10 @@ class SideInputsTest(unittest.TestCase):
   @pytest.mark.it_validatesrunner
   def test_multi_triggered_gbk_side_input(self):
     """Test a GBK sideinput, with multiple triggering."""
-    # TODO(BEAM-9322): Remove use of this experiment.
-    # This flag is only necessary when using the multi-output TestStream b/c
-    # it relies on using the PCollection output tags as the PCollection output
-    # ids.
+    # TODO(https://github.com/apache/beam/issues/20065): Remove use of this
+    # experiment. This flag is only necessary when using the multi-output
+    # TestStream b/c it relies on using the PCollection output tags as the
+    # PCollection output ids.
     with TestPipeline() as p:
 
       test_stream = (
diff --git a/sdks/python/apache_beam/transforms/stats.py b/sdks/python/apache_beam/transforms/stats.py
index 633cfebbc12..2599760f8d5 100644
--- a/sdks/python/apache_beam/transforms/stats.py
+++ b/sdks/python/apache_beam/transforms/stats.py
@@ -919,7 +919,8 @@ class ApproximateQuantilesCombineFn(CombineFn):
       self._offset_jitter = 2 - self._offset_jitter
       return (new_weight + self._offset_jitter) / 2
 
-  # TODO(BEAM-7746): Signature incompatible with supertype
+  # TODO(https://github.com/apache/beam/issues/19737): Signature incompatible
+  # with supertype
   def create_accumulator(self):  # type: ignore[override]
     # type: () -> _QuantileState
     self._qs = _QuantileState(
diff --git a/sdks/python/apache_beam/transforms/trigger.py b/sdks/python/apache_beam/transforms/trigger.py
index 47df57b4d67..3c7a0df5021 100644
--- a/sdks/python/apache_beam/transforms/trigger.py
+++ b/sdks/python/apache_beam/transforms/trigger.py
@@ -1175,8 +1175,8 @@ def create_trigger_driver(
     windowing, is_batch=False, phased_combine_fn=None, clock=None):
   """Create the TriggerDriver for the given windowing and options."""
 
-  # TODO(BEAM-10149): Respect closing and on-time behaviors.
-  # For batch, we should always fire once, no matter what.
+  # TODO(https://github.com/apache/beam/issues/20165): Respect closing and
+  # on-time behaviors. For batch, we should always fire once, no matter what.
   if is_batch and windowing.triggerfn == _Never():
     windowing = copy.copy(windowing)
     windowing.triggerfn = Always()
@@ -1636,11 +1636,11 @@ class InMemoryUnmergedState(UnmergedState):
   def get_earliest_hold(self):
     earliest_hold = MAX_TIMESTAMP
     for unused_window, tagged_states in self.state.items():
-      # TODO(BEAM-2519): currently, this assumes that the watermark hold tag is
-      # named "watermark".  This is currently only true because the only place
-      # watermark holds are set is in the GeneralTriggerDriver, where we use
-      # this name.  We should fix this by allowing enumeration of the tag types
-      # used in adding state.
+      # TODO(https://github.com/apache/beam/issues/18441): currently, this
+      # assumes that the watermark hold tag is named "watermark".  This is
+      # currently only true because the only place watermark holds are set is
+      # in the GeneralTriggerDriver, where we use this name.  We should fix
+      # this by allowing enumeration of the tag types used in adding state.
       if 'watermark' in tagged_states and tagged_states['watermark']:
         hold = min(tagged_states['watermark']) - TIME_GRANULARITY
         earliest_hold = min(earliest_hold, hold)
diff --git a/sdks/python/apache_beam/transforms/trigger_test.py b/sdks/python/apache_beam/transforms/trigger_test.py
index b8c5f899cab..d73168d1ba9 100644
--- a/sdks/python/apache_beam/transforms/trigger_test.py
+++ b/sdks/python/apache_beam/transforms/trigger_test.py
@@ -697,7 +697,7 @@ class TriggerPipelineTest(unittest.TestCase):
 
   def test_on_pane_watermark_hold_no_pipeline_stall(self):
     """A regression test added for
-    https://issues.apache.org/jira/browse/BEAM-10054."""
+    ttps://issues.apache.org/jira/browse/BEAM-10054."""
     START_TIMESTAMP = 1534842000
 
     test_stream = TestStream()
@@ -1043,7 +1043,8 @@ class BaseTestStreamTranscriptTest(TranscriptTest):
 
     # Elements are encoded as a json strings to allow other languages to
     # decode elements while executing the test stream.
-    # TODO(BEAM-8600): Eliminate these gymnastics.
+    # TODO(https://github.com/apache/beam/issues/19934): Eliminate these
+    # gymnastics.
     test_stream = TestStream(coder=coders.StrUtf8Coder()).with_output_types(str)
     for action, params in transcript:
       if action == 'expect':
@@ -1180,7 +1181,8 @@ class BaseTestStreamTranscriptTest(TranscriptTest):
        | beam.ParDo(Check(self.allow_out_of_order)))
 
     with TestPipeline() as p:
-      # TODO(BEAM-8601): Pass this during pipeline construction.
+      # TODO(https://github.com/apache/beam/issues/19933): Pass this during
+      # pipeline construction.
       p._options.view_as(StandardOptions).streaming = True
       p._options.view_as(TypeOptions).allow_unsafe_triggers = True
 
diff --git a/sdks/python/apache_beam/transforms/userstate_test.py b/sdks/python/apache_beam/transforms/userstate_test.py
index 50d41baf62d..93ae37701d2 100644
--- a/sdks/python/apache_beam/transforms/userstate_test.py
+++ b/sdks/python/apache_beam/transforms/userstate_test.py
@@ -443,8 +443,8 @@ class StatefulDoFnOnDirectRunnerTest(unittest.TestCase):
     # Use state on the TestCase class, since other references would be pickled
     # into a closure and not have the desired side effects.
     #
-    # TODO(BEAM-5295): Use assert_that after it works for the cases here in
-    # streaming mode.
+    # TODO(https://github.com/apache/beam/issues/18987): Use assert_that after
+    # it works for the cases here in streaming mode.
     StatefulDoFnOnDirectRunnerTest.all_records = []
 
   def record_dofn(self):
diff --git a/sdks/python/apache_beam/transforms/util.py b/sdks/python/apache_beam/transforms/util.py
index 2b6e438022c..c060c45ee75 100644
--- a/sdks/python/apache_beam/transforms/util.py
+++ b/sdks/python/apache_beam/transforms/util.py
@@ -754,9 +754,9 @@ class ReshufflePerKey(PTransform):
 
     ungrouped = pcoll | Map(reify_timestamps).with_output_types(Any)
 
-    # TODO(BEAM-8104) Using global window as one of the standard window.
-    # This is to mitigate the Dataflow Java Runner Harness limitation to
-    # accept only standard coders.
+    # TODO(https://github.com/apache/beam/issues/19785) Using global window as
+    # one of the standard window. This is to mitigate the Dataflow Java Runner
+    # Harness limitation to accept only standard coders.
     ungrouped._windowing = Windowing(
         window.GlobalWindows(),
         triggerfn=Always(),
diff --git a/sdks/python/apache_beam/typehints/batch.py b/sdks/python/apache_beam/typehints/batch.py
index f0f0509a710..4df4ea9dd97 100644
--- a/sdks/python/apache_beam/typehints/batch.py
+++ b/sdks/python/apache_beam/typehints/batch.py
@@ -82,8 +82,9 @@ class BatchConverter(Generic[B, E]):
       if result is not None:
         return result
 
-    # TODO(BEAM-14339): Aggregate error information from the failed
-    # BatchConverter matches instead of this generic error.
+    # TODO(https://github.com/apache/beam/issues/21654): Aggregate error
+    # information from the failed BatchConverter matches instead of this
+    # generic error.
     raise TypeError(
         f"Unable to find BatchConverter for element_type {element_type!r} and "
         f"batch_type {batch_type!r}")
diff --git a/sdks/python/apache_beam/typehints/native_type_compatibility.py b/sdks/python/apache_beam/typehints/native_type_compatibility.py
index 69d1af42135..4be0b30e7c9 100644
--- a/sdks/python/apache_beam/typehints/native_type_compatibility.py
+++ b/sdks/python/apache_beam/typehints/native_type_compatibility.py
@@ -189,7 +189,7 @@ def convert_to_beam_type(typ):
     return _type_var_cache[id(typ)]
   elif isinstance(typ, str):
     # Special case for forward references.
-    # TODO(BEAM-8487): Currently unhandled.
+    # TODO(https://github.com/apache/beam/issues/19954): Currently unhandled.
     _LOGGER.info('Converting string literal type hint to Any: "%s"', typ)
     return typehints.Any
   elif getattr(typ, '__module__', None) != 'typing':
@@ -197,9 +197,11 @@ def convert_to_beam_type(typ):
     return typ
 
   type_map = [
-      # TODO(BEAM-9355): Currently unsupported.
+      # TODO(https://github.com/apache/beam/issues/20076): Currently
+      # unsupported.
       _TypeMapEntry(match=is_new_type, arity=0, beam_type=typehints.Any),
-      # TODO(BEAM-8487): Currently unsupported.
+      # TODO(https://github.com/apache/beam/issues/19954): Currently
+      # unsupported.
       _TypeMapEntry(match=is_forward_ref, arity=0, beam_type=typehints.Any),
       _TypeMapEntry(match=is_any, arity=0, beam_type=typehints.Any),
       _TypeMapEntry(
diff --git a/sdks/python/apache_beam/typehints/native_type_compatibility_test.py b/sdks/python/apache_beam/typehints/native_type_compatibility_test.py
index 711bbc1e0d0..b13df6c2062 100644
--- a/sdks/python/apache_beam/typehints/native_type_compatibility_test.py
+++ b/sdks/python/apache_beam/typehints/native_type_compatibility_test.py
@@ -121,13 +121,13 @@ class NativeTypeCompatibilityTest(unittest.TestCase):
         typehints.Any, convert_to_beam_type(typing.NewType('Number', int)))
 
   def test_pattern(self):
-    # TODO(BEAM-10254): Unsupported.
+    # TODO(https://github.com/apache/beam/issues/20489): Unsupported.
     self.assertEqual(typehints.Any, convert_to_beam_type(typing.Pattern))
     self.assertEqual(typehints.Any, convert_to_beam_type(typing.Pattern[str]))
     self.assertEqual(typehints.Any, convert_to_beam_type(typing.Pattern[bytes]))
 
   def test_match(self):
-    # TODO(BEAM-10254): Unsupported.
+    # TODO(https://github.com/apache/beam/issues/20489): Unsupported.
     self.assertEqual(typehints.Any, convert_to_beam_type(typing.Match))
     self.assertEqual(typehints.Any, convert_to_beam_type(typing.Match[str]))
     self.assertEqual(typehints.Any, convert_to_beam_type(typing.Match[bytes]))
diff --git a/sdks/python/apache_beam/typehints/typed_pipeline_test.py b/sdks/python/apache_beam/typehints/typed_pipeline_test.py
index 5e833ce9fba..8b20c9e8296 100644
--- a/sdks/python/apache_beam/typehints/typed_pipeline_test.py
+++ b/sdks/python/apache_beam/typehints/typed_pipeline_test.py
@@ -751,9 +751,10 @@ class SideInputTest(unittest.TestCase):
   def test_var_positional_only_side_input_hint(self):
     # Test that a lambda that accepts only a VAR_POSITIONAL can accept
     # side-inputs.
-    # TODO(BEAM-8247): There's a bug with trivial_inference inferring the output
-    #   type when side-inputs are used (their type hints are not passed). Remove
-    #   with_output_types(...) when this bug is fixed.
+    # TODO(https://github.com/apache/beam/issues/19824): There's a bug with
+    #   trivial_inference inferring the output type when side-inputs are used
+    #   (their type hints are not passed). Remove with_output_types(...) when
+    #   this bug is fixed.
     result = (['a', 'b', 'c']
               | beam.Map(lambda *args: args, 5).with_input_types(
                   str, int).with_output_types(typehints.Tuple[str, int]))
@@ -960,9 +961,12 @@ class AnnotationsTest(unittest.TestCase):
     self.assertEqual(th.input_types, ((int, ), {}))
     self.assertEqual(th.output_types, ((int, ), {}))
 
-  @unittest.skip('BEAM-8662: Py3 annotations not yet supported for MapTuple')
+  @unittest.skip(
+      'https://github.com/apache/beam/issues/19961: Py3 annotations not yet '
+      'supported for MapTuple')
   def test_flat_map_tuple_wrapper(self):
-    # TODO(BEAM-8662): Also test with a fn that accepts default arguments.
+    # TODO(https://github.com/apache/beam/issues/19961): Also test with a fn
+    # that accepts default arguments.
     def tuple_map_fn(a: str, b: str, c: str) -> typehints.Iterable[str]:
       return [a, b, c]
 
@@ -987,9 +991,12 @@ class AnnotationsTest(unittest.TestCase):
     self.assertEqual(th.input_types, ((int, ), {}))
     self.assertEqual(th.output_types, ((typehints.Optional[int], ), {}))
 
-  @unittest.skip('BEAM-8662: Py3 annotations not yet supported for MapTuple')
+  @unittest.skip(
+      'https://github.com/apache/beam/issues/19961: Py3 annotations not yet '
+      'supported for MapTuple')
   def test_map_tuple(self):
-    # TODO(BEAM-8662): Also test with a fn that accepts default arguments.
+    # TODO(https://github.com/apache/beam/issues/19961): Also test with a fn
+    # that accepts default arguments.
     def tuple_map_fn(a: str, b: str, c: str) -> str:
       return a + b + c
 
diff --git a/sdks/python/apache_beam/typehints/typehints.py b/sdks/python/apache_beam/typehints/typehints.py
index 45c2366dd8b..9a158bd691d 100644
--- a/sdks/python/apache_beam/typehints/typehints.py
+++ b/sdks/python/apache_beam/typehints/typehints.py
@@ -365,7 +365,8 @@ def validate_composite_type_param(type_param, error_msg_prefix):
         (error_msg_prefix, type_param, type_param.__class__.__name__))
 
 
-# TODO(BEAM-12469): Remove this function and use plain repr() instead.
+# TODO(https://github.com/apache/beam/issues/20982): Remove this function and
+# use plain repr() instead.
 def _unified_repr(o):
   """Given an object return a qualified name for the object.
 
@@ -430,7 +431,8 @@ class AnyTypeConstraint(TypeConstraint):
     return 'Any'
 
   def __hash__(self):
-    # TODO(BEAM-3730): Fix typehints.TypeVariable issues with __hash__.
+    # TODO(https://github.com/apache/beam/issues/18633): Fix
+    # typehints.TypeVariable issues with __hash__.
     return hash(id(self))
 
   def type_check(self, instance):
@@ -452,7 +454,8 @@ class TypeVariable(AnyTypeConstraint):
     return type(self) == type(other)
 
   def __hash__(self):
-    # TODO(BEAM-3730): Fix typehints.TypeVariable issues with __hash__.
+    # TODO(https://github.com/apache/beam/issues/18633): Fix
+    # typehints.TypeVariable issues with __hash__.
     return hash(id(self))
 
   def __repr__(self):
diff --git a/sdks/python/apache_beam/utils/counters.py b/sdks/python/apache_beam/utils/counters.py
index e5e43eee54f..214fa433de1 100644
--- a/sdks/python/apache_beam/utils/counters.py
+++ b/sdks/python/apache_beam/utils/counters.py
@@ -163,7 +163,8 @@ class Counter(object):
   BEAM_DISTRIBUTION = cy_combiners.DistributionInt64Fn()
 
   # Dataflow Distribution Accumulator Fn.
-  # TODO(BEAM-4045): Generalize distribution counter if necessary.
+  # TODO(https://github.com/apache/beam/issues/18843): Generalize distribution
+  # counter if necessary.
   DATAFLOW_DISTRIBUTION = cy_combiners.DataflowDistributionCounterFn()
 
   def __init__(self, name, combine_fn):
diff --git a/sdks/python/apache_beam/utils/retry.py b/sdks/python/apache_beam/utils/retry.py
index 695a718278f..a6cde4a2a12 100644
--- a/sdks/python/apache_beam/utils/retry.py
+++ b/sdks/python/apache_beam/utils/retry.py
@@ -138,8 +138,8 @@ def retry_on_server_errors_filter(exception):
   return not isinstance(exception, PermanentException)
 
 
-# TODO(BEAM-6202): Dataflow returns 404 for job ids that actually exist.
-# Retry on those errors.
+# TODO(https://github.com/apache/beam/issues/19350): Dataflow returns 404 for
+# job ids that actually exist. Retry on those errors.
 def retry_on_server_errors_and_notfound_filter(exception):
   if HttpError is not None and isinstance(exception, HttpError):
     if exception.status_code == 404:  # 404 Not Found
diff --git a/sdks/python/apache_beam/utils/timestamp.py b/sdks/python/apache_beam/utils/timestamp.py
index 7f2ad75158e..502d1f78fa7 100644
--- a/sdks/python/apache_beam/utils/timestamp.py
+++ b/sdks/python/apache_beam/utils/timestamp.py
@@ -183,13 +183,15 @@ class Timestamp(object):
     """
 
     if timestamp_proto.nanos % 1000 != 0:
-      # TODO(BEAM-8738): Better define timestamps.
+      # TODO(https://github.com/apache/beam/issues/19922): Better define
+      # timestamps.
       raise ValueError(
           "Cannot convert from nanoseconds to microseconds " +
           "because this loses precision. Please make sure that " +
           "this is the correct behavior you want and manually " +
           "truncate the precision to the nearest microseconds. " +
-          "See [BEAM-8738] for more information.")
+          "See [https://github.com/apache/beam/issues/19922] for " +
+          "more information.")
 
     return Timestamp(
         seconds=timestamp_proto.seconds, micros=timestamp_proto.nanos // 1000)
@@ -331,13 +333,15 @@ class Duration(object):
     """
 
     if duration_proto.nanos % 1000 != 0:
-      # TODO(BEAM-8738): Better define durations.
+      # TODO(https://github.com/apache/beam/issues/19922): Better define
+      # durations.
       raise ValueError(
           "Cannot convert from nanoseconds to microseconds " +
           "because this loses precision. Please make sure that " +
           "this is the correct behavior you want and manually " +
           "truncate the precision to the nearest microseconds. " +
-          "See [BEAM-8738] for more information.")
+          "See [https://github.com/apache/beam/issues/19922] for " +
+          "more information.")
 
     return Duration(
         seconds=duration_proto.seconds, micros=duration_proto.nanos // 1000)
diff --git a/sdks/python/apache_beam/utils/timestamp_test.py b/sdks/python/apache_beam/utils/timestamp_test.py
index 9ad8014878d..fd67b4fc082 100644
--- a/sdks/python/apache_beam/utils/timestamp_test.py
+++ b/sdks/python/apache_beam/utils/timestamp_test.py
@@ -165,7 +165,8 @@ class TimestampTest(unittest.TestCase):
     self.assertEqual(actual_ts, expected_ts)
 
   def test_from_proto_fails_with_truncation(self):
-    # TODO(BEAM-8738): Better define timestamps.
+    # TODO(https://github.com/apache/beam/issues/19922): Better define
+    # timestamps.
     with self.assertRaises(ValueError):
       Timestamp.from_proto(timestamp_pb2.Timestamp(seconds=1234, nanos=56789))
 
@@ -226,7 +227,8 @@ class DurationTest(unittest.TestCase):
     self.assertEqual(actual_dur, expected_dur)
 
   def test_from_proto_fails_with_truncation(self):
-    # TODO(BEAM-8738): Better define durations.
+    # TODO(https://github.com/apache/beam/issues/19922): Better define
+    # durations.
     with self.assertRaises(ValueError):
       Duration.from_proto(duration_pb2.Duration(seconds=1234, nanos=56789))
 
diff --git a/sdks/python/apache_beam/utils/urns.py b/sdks/python/apache_beam/utils/urns.py
index c8da0da8f3f..3f2cb43e975 100644
--- a/sdks/python/apache_beam/utils/urns.py
+++ b/sdks/python/apache_beam/utils/urns.py
@@ -19,7 +19,8 @@
 
 # pytype: skip-file
 
-# TODO(BEAM-2685): Issue with dill + local classes + abc metaclass
+# TODO(https://github.com/apache/beam/issues/18399): Issue with dill + local
+# classes + abc metaclass
 # import abc
 import inspect
 from typing import TYPE_CHECKING
@@ -60,7 +61,8 @@ class RunnerApiFn(object):
   to register serialization via pickling.
   """
 
-  # TODO(BEAM-2685): Issue with dill + local classes + abc metaclass
+  # TODO(https://github.com/apache/beam/issues/18399): Issue with dill + local
+  # classes + abc metaclass
   # __metaclass__ = abc.ABCMeta
 
   _known_urns = {}  # type: Dict[str, Tuple[Optional[type], ConstructorFn]]
diff --git a/sdks/python/build-requirements.txt b/sdks/python/build-requirements.txt
index da98fb3ea43..6e0c221778b 100644
--- a/sdks/python/build-requirements.txt
+++ b/sdks/python/build-requirements.txt
@@ -15,7 +15,7 @@
 #    limitations under the License.
 #
 
-# TODO(BEAM-8954): Consider PEP-517/PEP-518 instead of this file.
+# TODO(https://github.com/apache/beam/issues/20051): Consider PEP-517/PEP-518 instead of this file.
 
 setuptools
 wheel>=0.36.0
diff --git a/sdks/python/mypy.ini b/sdks/python/mypy.ini
index de87de9e79c..3c77c741d79 100644
--- a/sdks/python/mypy.ini
+++ b/sdks/python/mypy.ini
@@ -60,7 +60,7 @@ ignore_errors = true
 ignore_errors = true
 
 
-# TODO(BEAM-7746): Remove the lines below.
+# TODO(https://github.com/apache/beam/issues/19737): Remove the lines below.
 
 [mypy-apache_beam.io.*]
 ignore_errors = true
diff --git a/sdks/python/scripts/run_integration_test.sh b/sdks/python/scripts/run_integration_test.sh
index da942bd30e6..c90621f27ff 100755
--- a/sdks/python/scripts/run_integration_test.sh
+++ b/sdks/python/scripts/run_integration_test.sh
@@ -254,7 +254,7 @@ if [[ -z $PIPELINE_OPTS ]]; then
   # Add --runner_v2 if provided
   if [[ "$RUNNER_V2" = true ]]; then
     opts+=("--experiments=use_runner_v2")
-    # TODO(BEAM-11779) remove shuffle_mode=appliance with runner v2 once issue is resolved.
+    # TODO(https://github.com/apache/beam/issues/20806) remove shuffle_mode=appliance with runner v2 once issue is resolved.
     opts+=("--experiments=shuffle_mode=appliance")
     if [[ "$STREAMING" = true ]]; then
       # Dataflow Runner V2 only supports streaming engine.
diff --git a/sdks/python/test-suites/dataflow/common.gradle b/sdks/python/test-suites/dataflow/common.gradle
index fdc428001c3..be278f6d3e3 100644
--- a/sdks/python/test-suites/dataflow/common.gradle
+++ b/sdks/python/test-suites/dataflow/common.gradle
@@ -243,7 +243,7 @@ task validatesRunnerStreamingTests {
 
   def dataflowWorkerJar = project(":runners:google-cloud-dataflow-java:worker").shadowJar.archivePath
 
-  // TODO(BEAM-3544,BEAM-5025): Disable tests with 'sickbay-streaming' tag.
+  // TODO(BEAM-3544,https://github.com/apache/beam/issues/19012): Disable tests with 'sickbay-streaming' tag.
   // Execute tests with xdists
   doFirst {
     def argMap = [
diff --git a/sdks/python/test-suites/direct/xlang/build.gradle b/sdks/python/test-suites/direct/xlang/build.gradle
index ffe0e836c66..74cda691f14 100644
--- a/sdks/python/test-suites/direct/xlang/build.gradle
+++ b/sdks/python/test-suites/direct/xlang/build.gradle
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-// TODO(BEAM-9980): Move this definition into common.gradle.
+// TODO(https://github.com/apache/beam/issues/20209): Move this definition into common.gradle.
 
 plugins { id 'org.apache.beam.module' }
 applyJavaNature(publish: false, exportJavadoc: false)
diff --git a/sdks/python/test-suites/portable/common.gradle b/sdks/python/test-suites/portable/common.gradle
index cdff054c180..e458cb0c9d5 100644
--- a/sdks/python/test-suites/portable/common.gradle
+++ b/sdks/python/test-suites/portable/common.gradle
@@ -57,7 +57,7 @@ task flinkValidatesRunner() {
   dependsOn 'flinkCompatibilityMatrixLOOPBACK'
 }
 
-// TODO(BEAM-8598): Enable on pre-commit.
+// TODO(https://github.com/apache/beam/issues/19962): Enable on pre-commit.
 tasks.register("flinkTriggerTranscript") {
   dependsOn 'setupVirtualenv'
   dependsOn ":runners:flink:${latestFlinkVersion}:job-server:shadowJar"
diff --git a/sdks/python/test-suites/tox/py37/build.gradle b/sdks/python/test-suites/tox/py37/build.gradle
index 2583d385bc5..2ea0e46ca5b 100644
--- a/sdks/python/test-suites/tox/py37/build.gradle
+++ b/sdks/python/test-suites/tox/py37/build.gradle
@@ -37,5 +37,5 @@ lint.dependsOn mypyPy37
 
 apply from: "../common.gradle"
 
-// TODO(BEAM-8954): Remove this once tox uses isolated builds.
+// TODO(https://github.com/apache/beam/issues/20051): Remove this once tox uses isolated builds.
 testPy37Cython.mustRunAfter testPython37, testPy37Cloud
diff --git a/sdks/python/test-suites/tox/py38/build.gradle b/sdks/python/test-suites/tox/py38/build.gradle
index 2a5ac1482ff..20dfbfe0fd5 100644
--- a/sdks/python/test-suites/tox/py38/build.gradle
+++ b/sdks/python/test-suites/tox/py38/build.gradle
@@ -31,7 +31,7 @@ check.dependsOn formatter
 
 apply from: "../common.gradle"
 
-// TODO(BEAM-8954): Remove this once tox uses isolated builds.
+// TODO(https://github.com/apache/beam/issues/20051): Remove this once tox uses isolated builds.
 testPy38Cython.mustRunAfter testPython38, testPy38CloudCoverage
 
 // Create a test task for each major version of pyarrow
diff --git a/sdks/python/test-suites/tox/py39/build.gradle b/sdks/python/test-suites/tox/py39/build.gradle
index 5f03be1f7f4..380cc1486da 100644
--- a/sdks/python/test-suites/tox/py39/build.gradle
+++ b/sdks/python/test-suites/tox/py39/build.gradle
@@ -28,5 +28,5 @@ pythonVersion = '3.9'
 
 apply from: "../common.gradle"
 
-// TODO(BEAM-8954): Remove this once tox uses isolated builds.
+// TODO(https://github.com/apache/beam/issues/20051): Remove this once tox uses isolated builds.
 testPy39Cython.mustRunAfter testPython39, testPy39Cloud
diff --git a/sdks/python/test-suites/tox/pycommon/build.gradle b/sdks/python/test-suites/tox/pycommon/build.gradle
index 4d63ba18186..7e697ec367e 100644
--- a/sdks/python/test-suites/tox/pycommon/build.gradle
+++ b/sdks/python/test-suites/tox/pycommon/build.gradle
@@ -20,7 +20,7 @@
  * Unit tests for commont Python components.
  */
 
-// TODO(BEAM-9980): See if we can avoid hardcoding python version here.
+// TODO(https://github.com/apache/beam/issues/20209): See if we can avoid hardcoding python version here.
 plugins { id 'org.apache.beam.module' }
 applyPythonNature()
 
diff --git a/sdks/python/tox.ini b/sdks/python/tox.ini
index 14b94291cfa..fa78ceea0f9 100644
--- a/sdks/python/tox.ini
+++ b/sdks/python/tox.ini
@@ -81,7 +81,7 @@ list_dependencies_command = {envbindir}/python.exe {envbindir}/pip.exe freeze
 # See https://docs.python.org/2/library/sys.html#sys.platform for platform codes
 platform = linux
 commands =
-  # TODO(BEAM-8954): Remove this build_ext invocation once local source no longer
+  # TODO(https://github.com/apache/beam/issues/20051): Remove this build_ext invocation once local source no longer
   #   shadows the installed apache_beam.
   python setup.py build_ext --inplace
   python apache_beam/examples/complete/autocomplete_test.py
@@ -143,7 +143,7 @@ deps =
   Sphinx==1.8.5
   sphinx_rtd_theme==0.4.3
   docutils<0.18
-  Jinja2==3.0.3 # TODO(BEAM-14172): Sphinx version is too old.
+  Jinja2==3.0.3 # TODO(https://github.com/apache/beam/issues/21587): Sphinx version is too old.
   torch
 commands =
   time {toxinidir}/scripts/generate_pydoc.sh
@@ -194,7 +194,7 @@ commands =
   time yapf --diff --parallel --recursive apache_beam
 
 [testenv:py3-dependency-check]
-# TODO(BEAM-10425): botocore, a part of [aws], wants docutils<0.16, but Sphinx
+# TODO(https://github.com/apache/beam/issues/20337): botocore, a part of [aws], wants docutils<0.16, but Sphinx
 # pulls in the latest docutils. Uncomment this line once botocore does not
 # conflict with Sphinx:
 # extras = docs,test,gcp,aws,interactive,interactive_test