You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@airflow.apache.org by po...@apache.org on 2021/01/17 18:00:34 UTC

[airflow] branch master updated: Use plain asserts in tests. (#12951)

This is an automated email from the ASF dual-hosted git repository.

potiuk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/airflow.git


The following commit(s) were added to refs/heads/master by this push:
     new 39d9057  Use plain asserts in tests. (#12951)
39d9057 is described below

commit 39d90579843ac2627cd3d46f784304adacf372ed
Author: Joshua Carp <jm...@gmail.com>
AuthorDate: Sun Jan 17 13:00:17 2021 -0500

    Use plain asserts in tests. (#12951)
---
 TESTING.rst                                        |   4 +-
 chart/tests/test_basic_helm_chart.py               |  69 ++--
 chart/tests/test_celery_kubernetes_executor.py     |   8 +-
 chart/tests/test_cleanup_pods.py                   |  35 +-
 chart/tests/test_dags_persistent_volume_claim.py   |  19 +-
 chart/tests/test_extra_configmaps_secrets.py       |  10 +-
 chart/tests/test_extra_env_env_from.py             |   4 +-
 chart/tests/test_flower_authorization.py           |  30 +-
 chart/tests/test_git_sync_scheduler.py             | 100 ++---
 chart/tests/test_git_sync_webserver.py             |  10 +-
 chart/tests/test_git_sync_worker.py                |  12 +-
 chart/tests/test_ingress_web.py                    |   2 +-
 chart/tests/test_keda.py                           |   6 +-
 chart/tests/test_kerberos.py                       |   2 +-
 chart/tests/test_migrate_database_job.py           |   7 +-
 chart/tests/test_pod_launcher_role.py              |   4 +-
 chart/tests/test_pod_template_file.py              | 193 ++++-----
 chart/tests/test_redis.py                          |  33 +-
 chart/tests/test_scheduler.py                      |   6 +-
 chart/tests/test_worker.py                         |   6 +-
 docs/apache-airflow/best-practices.rst             |  23 +-
 kubernetes_tests/test_kubernetes_executor.py       |  28 +-
 kubernetes_tests/test_kubernetes_pod_operator.py   | 112 +++---
 .../test_kubernetes_pod_operator_backcompat.py     |  72 ++--
 pylintrc                                           |   3 +-
 tests/always/test_example_dags.py                  |   8 +-
 tests/always/test_project_structure.py             |  10 +-
 tests/api/auth/backend/test_kerberos_auth.py       |   8 +-
 tests/api/auth/test_client.py                      |   4 +-
 tests/api/client/test_local_client.py              |  21 +-
 tests/api/common/experimental/test_delete_dag.py   |  32 +-
 tests/api/common/experimental/test_mark_tasks.py   | 114 +++---
 tests/api/common/experimental/test_pool.py         |  92 +++--
 tests/api/common/experimental/test_trigger_dag.py  |  13 +-
 .../endpoints/test_connection_endpoint.py          | 182 ++++-----
 tests/api_connexion/endpoints/test_dag_endpoint.py | 177 ++++-----
 .../endpoints/test_dag_run_endpoint.py             |  94 ++---
 .../endpoints/test_dag_source_endpoint.py          |  16 +-
 .../endpoints/test_event_log_endpoint.py           | 116 +++---
 .../endpoints/test_extra_link_endpoint.py          |  73 ++--
 .../endpoints/test_health_endpoint.py              |  30 +-
 .../endpoints/test_import_error_endpoint.py        |  73 ++--
 tests/api_connexion/endpoints/test_log_endpoint.py |  59 ++-
 .../api_connexion/endpoints/test_pool_endpoint.py  | 246 +++++-------
 .../endpoints/test_task_instance_endpoint.py       | 150 ++++---
 .../endpoints/test_variable_endpoint.py            |   2 +-
 .../endpoints/test_version_endpoint.py             |   4 +-
 .../api_connexion/endpoints/test_xcom_endpoint.py  | 182 ++++-----
 tests/api_connexion/schemas/test_common_schema.py  |  97 +++--
 .../schemas/test_connection_schema.py              | 141 +++----
 tests/api_connexion/schemas/test_dag_run_schema.py |  88 ++---
 tests/api_connexion/schemas/test_dag_schema.py     |  88 ++---
 tests/api_connexion/schemas/test_error_schema.py   |  52 ++-
 .../api_connexion/schemas/test_event_log_schema.py |  76 ++--
 tests/api_connexion/schemas/test_health_schema.py  |   2 +-
 tests/api_connexion/schemas/test_pool_schemas.py   |  66 ++--
 .../schemas/test_task_instance_schema.py           |  11 +-
 tests/api_connexion/schemas/test_version_schema.py |   2 +-
 tests/api_connexion/schemas/test_xcom_schema.py    | 113 +++---
 tests/api_connexion/test_error_handling.py         |   6 +-
 tests/api_connexion/test_parameters.py             |  17 +-
 tests/cli/commands/test_celery_command.py          |  10 +-
 tests/cli/commands/test_cheat_sheet_command.py     |   6 +-
 tests/cli/commands/test_config_command.py          |  18 +-
 tests/cli/commands/test_connection_command.py      |  46 +--
 tests/cli/commands/test_dag_command.py             |  92 ++---
 tests/cli/commands/test_db_command.py              |  37 +-
 tests/cli/commands/test_info_command.py            |  36 +-
 tests/cli/commands/test_kubernetes_command.py      |   8 +-
 tests/cli/commands/test_legacy_commands.py         |  23 +-
 tests/cli/commands/test_plugins_command.py         |   2 +-
 tests/cli/commands/test_pool_command.py            |  18 +-
 tests/cli/commands/test_role_command.py            |  20 +-
 tests/cli/commands/test_sync_perm_command.py       |   2 +-
 tests/cli/commands/test_task_command.py            |  76 ++--
 tests/cli/commands/test_user_command.py            |  38 +-
 tests/cli/commands/test_variable_command.py        |  40 +-
 tests/cli/commands/test_version_command.py         |   2 +-
 tests/cli/commands/test_webserver_command.py       |  66 ++--
 tests/cli/test_cli_parser.py                       |  54 ++-
 tests/core/test_config_templates.py                |   4 +-
 tests/core/test_configuration.py                   | 279 +++++++------
 tests/core/test_core.py                            |  76 ++--
 tests/core/test_core_to_contrib.py                 |  18 +-
 tests/core/test_impersonation_tests.py             |   4 +-
 tests/core/test_logging_config.py                  |  12 +-
 tests/core/test_providers_manager.py               |  15 +-
 tests/core/test_sentry.py                          |   4 +-
 tests/core/test_settings.py                        |  14 +-
 tests/core/test_sqlalchemy_config.py               |   3 +-
 tests/core/test_stats.py                           |   7 +-
 tests/executors/test_base_executor.py              |   8 +-
 tests/executors/test_celery_executor.py            | 125 +++---
 tests/executors/test_dask_executor.py              |   8 +-
 tests/executors/test_executor_loader.py            |  12 +-
 tests/executors/test_kubernetes_executor.py        |  63 ++-
 tests/executors/test_local_executor.py             |  10 +-
 tests/hooks/test_dbapi.py                          |  22 +-
 tests/jobs/test_backfill_job.py                    | 322 ++++++++-------
 tests/jobs/test_local_task_job.py                  |  54 +--
 tests/jobs/test_scheduler_job.py                   | 373 +++++++++---------
 tests/kubernetes/models/test_secret.py             | 144 ++++---
 tests/kubernetes/test_client.py                    |   4 +-
 tests/kubernetes/test_pod_generator.py             | 151 ++++----
 tests/kubernetes/test_pod_launcher.py              |  36 +-
 tests/kubernetes/test_refresh_config.py            |   7 +-
 tests/lineage/test_lineage.py                      |  22 +-
 tests/macros/test_hive.py                          |  10 +-
 tests/models/test_baseoperator.py                  |  84 ++--
 tests/models/test_cleartasks.py                    | 100 ++---
 tests/models/test_connection.py                    | 115 +++---
 tests/models/test_dag.py                           | 431 ++++++++++-----------
 tests/models/test_dagbag.py                        | 133 +++----
 tests/models/test_dagcode.py                       |  22 +-
 tests/models/test_dagrun.py                        | 118 +++---
 tests/models/test_pool.py                          |  49 ++-
 tests/models/test_renderedtifields.py              |  44 +--
 tests/models/test_sensorinstance.py                |   4 +-
 tests/models/test_serialized_dag.py                |  28 +-
 tests/models/test_skipmixin.py                     |   4 +-
 tests/models/test_taskinstance.py                  | 367 +++++++++---------
 tests/models/test_variable.py                      |  71 ++--
 tests/models/test_xcom.py                          |  41 +-
 tests/operators/test_bash.py                       |  24 +-
 tests/operators/test_branch_operator.py            |  20 +-
 tests/operators/test_latest_only_operator.py       |  91 ++---
 tests/operators/test_python.py                     | 118 +++---
 tests/operators/test_sql.py                        |  76 ++--
 tests/operators/test_subdag_operator.py            |  33 +-
 tests/operators/test_trigger_dagrun.py             |  42 +-
 tests/plugins/test_plugin_ignore.py                |   4 +-
 tests/plugins/test_plugins_manager.py              |  26 +-
 tests/providers/amazon/aws/hooks/test_athena.py    |  16 +-
 tests/providers/amazon/aws/hooks/test_base_aws.py  |  45 ++-
 .../amazon/aws/hooks/test_base_aws_system.py       |   2 +-
 .../amazon/aws/hooks/test_batch_client.py          |  79 ++--
 .../amazon/aws/hooks/test_batch_waiters.py         |  86 ++--
 .../amazon/aws/hooks/test_cloud_formation.py       |  14 +-
 tests/providers/amazon/aws/hooks/test_datasync.py  | 109 +++---
 tests/providers/amazon/aws/hooks/test_dynamodb.py  |   4 +-
 tests/providers/amazon/aws/hooks/test_ec2.py       |  10 +-
 .../hooks/test_elasticache_replication_group.py    |   4 +-
 tests/providers/amazon/aws/hooks/test_emr.py       |  10 +-
 tests/providers/amazon/aws/hooks/test_glacier.py   |   6 +-
 tests/providers/amazon/aws/hooks/test_glue.py      |   6 +-
 .../amazon/aws/hooks/test_glue_catalog.py          |  25 +-
 tests/providers/amazon/aws/hooks/test_kinesis.py   |   8 +-
 tests/providers/amazon/aws/hooks/test_logs.py      |   2 +-
 tests/providers/amazon/aws/hooks/test_redshift.py  |  16 +-
 tests/providers/amazon/aws/hooks/test_s3.py        |  15 +-
 tests/providers/amazon/aws/hooks/test_sagemaker.py |  82 ++--
 .../amazon/aws/hooks/test_secrets_manager.py       |   8 +-
 tests/providers/amazon/aws/hooks/test_sns.py       |   2 +-
 tests/providers/amazon/aws/hooks/test_sqs.py       |   2 +-
 .../amazon/aws/hooks/test_step_function.py         |   2 +-
 .../amazon/aws/log/test_cloudwatch_task_handler.py |  31 +-
 .../amazon/aws/log/test_s3_task_handler.py         |  46 +--
 .../providers/amazon/aws/operators/test_athena.py  |  40 +-
 tests/providers/amazon/aws/operators/test_batch.py |  49 ++-
 .../amazon/aws/operators/test_datasync.py          | 228 ++++++-----
 .../aws/operators/test_ec2_start_instance.py       |  12 +-
 .../amazon/aws/operators/test_ec2_stop_instance.py |  12 +-
 tests/providers/amazon/aws/operators/test_ecs.py   |  59 ++-
 .../amazon/aws/operators/test_emr_add_steps.py     |  13 +-
 .../aws/operators/test_emr_create_job_flow.py      |  12 +-
 .../aws/operators/test_emr_modify_cluster.py       |  13 +-
 tests/providers/amazon/aws/operators/test_glue.py  |   2 +-
 .../amazon/aws/operators/test_s3_copy_object.py    |  12 +-
 .../amazon/aws/operators/test_s3_delete_objects.py |  18 +-
 .../amazon/aws/operators/test_s3_file_transform.py |   9 +-
 .../providers/amazon/aws/operators/test_s3_list.py |   2 +-
 .../amazon/aws/operators/test_sagemaker_base.py    |   2 +-
 .../aws/operators/test_sagemaker_endpoint.py       |   6 +-
 .../operators/test_sagemaker_endpoint_config.py    |   7 +-
 .../amazon/aws/operators/test_sagemaker_model.py   |   5 +-
 .../aws/operators/test_sagemaker_processing.py     |  10 +-
 .../aws/operators/test_sagemaker_training.py       |  23 +-
 .../aws/operators/test_sagemaker_transform.py      |  14 +-
 .../amazon/aws/operators/test_sagemaker_tuning.py  |  37 +-
 tests/providers/amazon/aws/operators/test_sns.py   |  14 +-
 tests/providers/amazon/aws/operators/test_sqs.py   |  12 +-
 .../test_step_function_get_execution_output.py     |  10 +-
 .../test_step_function_start_execution.py          |  14 +-
 .../amazon/aws/secrets/test_secrets_manager.py     |  16 +-
 .../amazon/aws/secrets/test_systems_manager.py     |  20 +-
 tests/providers/amazon/aws/sensors/test_athena.py  |  16 +-
 .../amazon/aws/sensors/test_cloud_formation.py     |  19 +-
 .../amazon/aws/sensors/test_ec2_instance_state.py  |  27 +-
 .../providers/amazon/aws/sensors/test_emr_base.py  |  14 +-
 .../amazon/aws/sensors/test_emr_job_flow.py        |   9 +-
 .../providers/amazon/aws/sensors/test_emr_step.py  |  12 +-
 tests/providers/amazon/aws/sensors/test_glacier.py |  14 +-
 tests/providers/amazon/aws/sensors/test_glue.py    |   4 +-
 .../aws/sensors/test_glue_catalog_partition.py     |  16 +-
 .../providers/amazon/aws/sensors/test_redshift.py  |   6 +-
 tests/providers/amazon/aws/sensors/test_s3_key.py  |  27 +-
 .../amazon/aws/sensors/test_s3_keys_unchanged.py   |  23 +-
 .../providers/amazon/aws/sensors/test_s3_prefix.py |   4 +-
 .../amazon/aws/sensors/test_sagemaker_base.py      |  12 +-
 .../amazon/aws/sensors/test_sagemaker_endpoint.py  |   7 +-
 .../amazon/aws/sensors/test_sagemaker_training.py  |  11 +-
 .../amazon/aws/sensors/test_sagemaker_transform.py |   7 +-
 .../amazon/aws/sensors/test_sagemaker_tuning.py    |   7 +-
 tests/providers/amazon/aws/sensors/test_sqs.py     |  22 +-
 .../aws/sensors/test_step_function_execution.py    |  15 +-
 .../amazon/aws/transfers/test_dynamodb_to_s3.py    |   2 +-
 .../amazon/aws/transfers/test_gcs_to_s3.py         |  22 +-
 .../amazon/aws/transfers/test_google_api_to_s3.py  |   5 +-
 .../amazon/aws/transfers/test_hive_to_dynamodb.py  |   6 +-
 .../amazon/aws/transfers/test_mongo_to_s3.py       |  20 +-
 .../amazon/aws/transfers/test_redshift_to_s3.py    |   8 +-
 .../amazon/aws/transfers/test_s3_to_redshift.py    |   8 +-
 .../amazon/aws/transfers/test_s3_to_sftp.py        |  21 +-
 .../amazon/aws/transfers/test_sftp_to_s3.py        |  12 +-
 .../apache/cassandra/hooks/test_cassandra.py       |  30 +-
 .../apache/cassandra/sensors/test_record.py        |   6 +-
 .../apache/cassandra/sensors/test_table.py         |   6 +-
 tests/providers/apache/druid/hooks/test_druid.py   |  51 +--
 .../providers/apache/druid/operators/test_druid.py |   2 +-
 .../apache/druid/operators/test_druid_check.py     |   4 +-
 .../apache/druid/transfers/test_hive_to_druid.py   |   2 +-
 tests/providers/apache/hdfs/hooks/test_hdfs.py     |  10 +-
 tests/providers/apache/hdfs/hooks/test_webhdfs.py  |  13 +-
 tests/providers/apache/hdfs/sensors/test_hdfs.py   |  16 +-
 .../providers/apache/hdfs/sensors/test_web_hdfs.py |   4 +-
 tests/providers/apache/hive/hooks/test_hive.py     | 103 ++---
 tests/providers/apache/hive/operators/test_hive.py |  14 +-
 .../apache/hive/operators/test_hive_stats.py       |  54 ++-
 .../hive/sensors/test_named_hive_partition.py      |  28 +-
 .../apache/hive/transfers/test_hive_to_mysql.py    |   4 +-
 .../apache/hive/transfers/test_mssql_to_hive.py    |   8 +-
 .../apache/hive/transfers/test_mysql_to_hive.py    |   6 +-
 .../apache/hive/transfers/test_s3_to_hive.py       |  44 +--
 tests/providers/apache/kylin/hooks/test_kylin.py   |  22 +-
 .../apache/kylin/operators/test_kylin_cube.py      |  34 +-
 tests/providers/apache/livy/hooks/test_livy.py     | 148 +++----
 tests/providers/apache/livy/operators/test_livy.py |  12 +-
 tests/providers/apache/livy/sensors/test_livy.py   |   2 +-
 tests/providers/apache/pig/hooks/test_pig.py       |  19 +-
 tests/providers/apache/pig/operators/test_pig.py   |   4 +-
 tests/providers/apache/pinot/hooks/test_pinot.py   |  24 +-
 .../apache/spark/hooks/test_spark_jdbc.py          |   4 +-
 .../providers/apache/spark/hooks/test_spark_sql.py | 120 +++---
 .../apache/spark/hooks/test_spark_submit.py        | 145 ++++---
 .../apache/spark/operators/test_spark_jdbc.py      |  56 +--
 .../apache/spark/operators/test_spark_sql.py       |  30 +-
 .../apache/spark/operators/test_spark_submit.py    |  54 +--
 tests/providers/apache/sqoop/hooks/test_sqoop.py   | 205 +++++-----
 .../providers/apache/sqoop/operators/test_sqoop.py |  52 +--
 .../providers/celery/sensors/test_celery_queue.py  |   6 +-
 tests/providers/cloudant/hooks/test_cloudant.py    |   6 +-
 .../cncf/kubernetes/hooks/test_kubernetes.py       |  17 +-
 .../kubernetes/operators/test_kubernetes_pod.py    |  58 ++-
 .../kubernetes/sensors/test_spark_kubernetes.py    |  24 +-
 .../providers/databricks/hooks/test_databricks.py  |  47 +--
 .../databricks/operators/test_databricks.py        |  42 +-
 tests/providers/datadog/hooks/test_datadog.py      |   8 +-
 tests/providers/datadog/sensors/test_datadog.py    |   4 +-
 tests/providers/dingding/hooks/test_dingding.py    |  25 +-
 .../providers/dingding/operators/test_dingding.py  |  12 +-
 .../discord/hooks/test_discord_webhook.py          |  12 +-
 .../discord/operators/test_discord_webhook.py      |  14 +-
 tests/providers/docker/hooks/test_docker.py        |  14 +-
 tests/providers/docker/operators/test_docker.py    |  47 +--
 .../docker/operators/test_docker_swarm.py          |  25 +-
 .../elasticsearch/hooks/test_elasticsearch.py      |  10 +-
 .../elasticsearch/log/test_es_task_handler.py      | 156 ++++----
 tests/providers/exasol/hooks/test_exasol.py        |  32 +-
 tests/providers/ftp/sensors/test_ftp.py            |  22 +-
 .../_internal_client/test_secret_manager_client.py |   8 +-
 .../providers/google/cloud/hooks/test_bigquery.py  | 180 ++++-----
 .../google/cloud/hooks/test_bigquery_dts.py        |   4 +-
 .../google/cloud/hooks/test_bigquery_system.py     |  14 +-
 .../providers/google/cloud/hooks/test_bigtable.py  |  36 +-
 .../google/cloud/hooks/test_cloud_build.py         |  33 +-
 .../google/cloud/hooks/test_cloud_memorystore.py   |  23 +-
 .../providers/google/cloud/hooks/test_cloud_sql.py | 201 +++++-----
 .../hooks/test_cloud_storage_transfer_service.py   | 145 ++++---
 tests/providers/google/cloud/hooks/test_compute.py |  48 +--
 .../google/cloud/hooks/test_compute_ssh.py         |  58 +--
 .../google/cloud/hooks/test_datacatalog.py         |  51 +--
 .../providers/google/cloud/hooks/test_dataflow.py  |  86 ++--
 .../providers/google/cloud/hooks/test_dataprep.py  |  12 +-
 .../providers/google/cloud/hooks/test_dataproc.py  |  41 +-
 .../providers/google/cloud/hooks/test_datastore.py |  58 +--
 tests/providers/google/cloud/hooks/test_dlp.py     | 165 ++++----
 .../providers/google/cloud/hooks/test_functions.py |  34 +-
 tests/providers/google/cloud/hooks/test_gcs.py     | 113 +++---
 tests/providers/google/cloud/hooks/test_gdm.py     |  14 +-
 tests/providers/google/cloud/hooks/test_kms.py     |  12 +-
 .../google/cloud/hooks/test_kms_system.py          |   4 +-
 .../google/cloud/hooks/test_kubernetes_engine.py   |  21 +-
 .../google/cloud/hooks/test_life_sciences.py       |  35 +-
 .../providers/google/cloud/hooks/test_mlengine.py  |  53 +--
 .../google/cloud/hooks/test_natural_language.py    |  16 +-
 .../providers/google/cloud/hooks/test_os_login.py  |   3 +-
 tests/providers/google/cloud/hooks/test_pubsub.py  |  61 +--
 .../google/cloud/hooks/test_secret_manager.py      |   4 +-
 .../cloud/hooks/test_secret_manager_system.py      |  10 +-
 tests/providers/google/cloud/hooks/test_spanner.py |  66 ++--
 .../google/cloud/hooks/test_speech_to_text.py      |   4 +-
 tests/providers/google/cloud/hooks/test_tasks.py   |  30 +-
 .../google/cloud/hooks/test_text_to_speech.py      |   4 +-
 .../providers/google/cloud/hooks/test_translate.py |  19 +-
 .../google/cloud/hooks/test_video_intelligence.py  |   8 +-
 tests/providers/google/cloud/hooks/test_vision.py  | 118 +++---
 .../google/cloud/log/test_gcs_task_handler.py      |  30 +-
 .../cloud/log/test_gcs_task_handler_system.py      |   6 +-
 .../cloud/log/test_stackdriver_task_handler.py     |  42 +-
 .../log/test_stackdriver_task_handler_system.py    |  10 +-
 .../google/cloud/operators/test_bigquery.py        | 102 ++---
 .../google/cloud/operators/test_bigtable.py        | 116 +++---
 .../google/cloud/operators/test_cloud_build.py     |  21 +-
 .../google/cloud/operators/test_cloud_sql.py       | 106 ++---
 .../cloud/operators/test_cloud_sql_system.py       |  22 +-
 .../test_cloud_storage_transfer_service.py         | 194 +++++-----
 .../google/cloud/operators/test_compute.py         | 145 +++----
 .../google/cloud/operators/test_datacatalog.py     |  12 +-
 .../google/cloud/operators/test_dataflow.py        |  46 +--
 .../google/cloud/operators/test_dataproc.py        |  80 ++--
 .../google/cloud/operators/test_functions.py       |  59 +--
 tests/providers/google/cloud/operators/test_gcs.py |   2 +-
 .../cloud/operators/test_kubernetes_engine.py      |  21 +-
 .../google/cloud/operators/test_life_sciences.py   |   4 +-
 .../google/cloud/operators/test_mlengine.py        | 123 +++---
 .../google/cloud/operators/test_mlengine_utils.py  |  16 +-
 .../cloud/operators/test_natural_language.py       |   8 +-
 .../google/cloud/operators/test_pubsub.py          |  10 +-
 .../google/cloud/operators/test_spanner.py         |  82 ++--
 .../google/cloud/operators/test_speech_to_text.py  |  14 +-
 .../google/cloud/operators/test_stackdriver.py     |   4 +-
 .../providers/google/cloud/operators/test_tasks.py | 100 +++--
 .../google/cloud/operators/test_text_to_speech.py  |   7 +-
 .../google/cloud/operators/test_translate.py       |  19 +-
 .../cloud/operators/test_translate_speech.py       |  26 +-
 .../google/cloud/operators/test_vision.py          |   4 +-
 .../google/cloud/secrets/test_secret_manager.py    |  40 +-
 .../cloud/secrets/test_secret_manager_system.py    |   4 +-
 .../google/cloud/sensors/test_bigquery.py          |   4 +-
 .../google/cloud/sensors/test_bigquery_dts.py      |   4 +-
 .../google/cloud/sensors/test_bigtable.py          |  15 +-
 .../sensors/test_cloud_storage_transfer_service.py |  10 +-
 .../google/cloud/sensors/test_dataflow.py          |  21 +-
 .../google/cloud/sensors/test_dataproc.py          |   9 +-
 tests/providers/google/cloud/sensors/test_gcs.py   |  57 +--
 .../providers/google/cloud/sensors/test_pubsub.py  |   9 +-
 .../google/cloud/transfers/test_adls_to_gcs.py     |  16 +-
 .../cloud/transfers/test_azure_fileshare_to_gcs.py |  16 +-
 .../cloud/transfers/test_cassandra_to_gcs.py       |  35 +-
 .../google/cloud/transfers/test_gcs_to_gcs.py      |  10 +-
 .../google/cloud/transfers/test_gcs_to_sftp.py     |  28 +-
 .../google/cloud/transfers/test_local_to_gcs.py    |  10 +-
 .../google/cloud/transfers/test_mssql_to_gcs.py    |  30 +-
 .../google/cloud/transfers/test_mysql_to_gcs.py    |  79 ++--
 .../google/cloud/transfers/test_oracle_to_gcs.py   |  30 +-
 .../google/cloud/transfers/test_postgres_to_gcs.py |  30 +-
 .../google/cloud/transfers/test_presto_to_gcs.py   |  54 +--
 .../google/cloud/transfers/test_s3_to_gcs.py       |  16 +-
 .../cloud/transfers/test_salesforce_to_gcs.py      |   2 +-
 .../google/cloud/transfers/test_sftp_to_gcs.py     |   8 +-
 .../cloud/utils/test_credentials_provider.py       | 113 +++---
 .../google/cloud/utils/test_field_sanitizer.py     |  60 ++-
 .../google/cloud/utils/test_field_validator.py     |  52 +--
 .../cloud/utils/test_mlengine_operator_utils.py    | 119 +++---
 .../utils/test_mlengine_prediction_summary.py      |  15 +-
 .../common/auth_backend/test_google_openid.py      |  26 +-
 .../google/common/hooks/test_base_google.py        | 153 ++++----
 .../common/utils/test_id_token_credentials.py      |  21 +-
 .../google/firebase/hooks/test_firestore.py        |  21 +-
 .../marketing_platform/hooks/test_analytics.py     |  14 +-
 .../hooks/test_campaign_manager.py                 |  22 +-
 .../marketing_platform/hooks/test_display_video.py |  18 +-
 .../marketing_platform/hooks/test_search_ads.py    |   8 +-
 .../sensors/test_campaign_manager.py               |   2 +-
 tests/providers/google/suite/hooks/test_drive.py   |  10 +-
 tests/providers/google/suite/hooks/test_sheets.py  |  24 +-
 .../google/suite/transfers/test_gcs_to_gdrive.py   |   4 +-
 tests/providers/grpc/hooks/test_grpc.py            |  26 +-
 tests/providers/grpc/operators/test_grpc.py        |   2 +-
 .../_internal_client/test_vault_client.py          | 195 +++++-----
 tests/providers/hashicorp/hooks/test_vault.py      | 155 ++++----
 tests/providers/hashicorp/secrets/test_vault.py    |  33 +-
 tests/providers/http/hooks/test_http.py            |  43 +-
 tests/providers/http/operators/test_http.py        |   4 +-
 tests/providers/http/sensors/test_http.py          |  11 +-
 tests/providers/imap/hooks/test_imap.py            |  48 +--
 .../providers/imap/sensors/test_imap_attachment.py |   2 +-
 tests/providers/jdbc/hooks/test_jdbc.py            |   6 +-
 tests/providers/jenkins/hooks/test_jenkins.py      |   8 +-
 .../jenkins/operators/test_jenkins_job_trigger.py  |  12 +-
 tests/providers/jira/hooks/test_jira.py            |   6 +-
 tests/providers/jira/operators/test_jira.py        |   8 +-
 tests/providers/jira/sensors/test_jira.py          |   4 +-
 tests/providers/microsoft/azure/hooks/test_adx.py  |  13 +-
 .../microsoft/azure/hooks/test_azure_batch.py      |  14 +-
 .../azure/hooks/test_azure_container_instance.py   |   6 +-
 .../azure/hooks/test_azure_container_registry.py   |   8 +-
 .../azure/hooks/test_azure_container_volume.py     |  12 +-
 .../microsoft/azure/hooks/test_azure_cosmos.py     |  21 +-
 .../microsoft/azure/hooks/test_azure_data_lake.py  |   6 +-
 .../microsoft/azure/hooks/test_azure_fileshare.py  |  16 +-
 tests/providers/microsoft/azure/hooks/test_wasb.py |  24 +-
 .../microsoft/azure/log/test_wasb_task_handler.py  |  31 +-
 .../microsoft/azure/operators/test_adls_list.py    |   2 +-
 .../microsoft/azure/operators/test_adx.py          |  10 +-
 .../microsoft/azure/operators/test_azure_batch.py  |  30 +-
 .../operators/test_azure_container_instances.py    |  87 ++---
 .../azure/operators/test_wasb_delete_blob.py       |  12 +-
 .../azure/secrets/test_azure_key_vault.py          |  24 +-
 .../microsoft/azure/sensors/test_azure_cosmos.py   |   4 +-
 .../providers/microsoft/azure/sensors/test_wasb.py |  24 +-
 .../azure/transfers/test_azure_blob_to_gcs.py      |  24 +-
 .../microsoft/azure/transfers/test_file_to_wasb.py |  14 +-
 .../azure/transfers/test_local_to_adls.py          |   6 +-
 .../transfers/test_oracle_to_azure_data_lake.py    |   8 +-
 .../providers/microsoft/mssql/hooks/test_mssql.py  |   4 +-
 .../providers/microsoft/winrm/hooks/test_winrm.py  |  14 +-
 .../microsoft/winrm/operators/test_winrm.py        |   6 +-
 tests/providers/mongo/hooks/test_mongo.py          |  54 +--
 tests/providers/mongo/sensors/test_mongo.py        |   2 +-
 tests/providers/mysql/hooks/test_mysql.py          |  70 ++--
 .../providers/mysql/transfers/test_s3_to_mysql.py  |   4 +-
 tests/providers/neo4j/hooks/test_neo4j.py          |   8 +-
 tests/providers/openfaas/hooks/test_openfaas.py    |  25 +-
 .../opsgenie/hooks/test_opsgenie_alert.py          |  17 +-
 .../opsgenie/operators/test_opsgenie_alert.py      |  30 +-
 tests/providers/oracle/hooks/test_oracle.py        |  61 ++-
 tests/providers/pagerduty/hooks/test_pagerduty.py  |  12 +-
 tests/providers/postgres/hooks/test_postgres.py    |  16 +-
 tests/providers/presto/hooks/test_presto.py        |  26 +-
 tests/providers/qubole/hooks/test_qubole.py        |   6 +-
 tests/providers/qubole/hooks/test_qubole_check.py  |   8 +-
 tests/providers/qubole/operators/test_qubole.py    |  50 ++-
 .../qubole/operators/test_qubole_check.py          |  11 +-
 tests/providers/qubole/sensors/test_qubole.py      |   8 +-
 tests/providers/redis/hooks/test_redis.py          |  22 +-
 .../redis/operators/test_redis_publish.py          |   8 +-
 tests/providers/redis/sensors/test_redis_key.py    |   4 +-
 .../providers/redis/sensors/test_redis_pub_sub.py  |  24 +-
 .../providers/salesforce/hooks/test_salesforce.py  |  15 +-
 tests/providers/salesforce/hooks/test_tableau.py   |   2 +-
 .../operators/test_tableau_refresh_workbook.py     |   9 +-
 .../salesforce/sensors/test_tableau_job_status.py  |   6 +-
 tests/providers/samba/hooks/test_samba.py          |   5 +-
 tests/providers/segment/hooks/test_segment.py      |  10 +-
 .../segment/operators/test_segment_track_event.py  |  10 +-
 tests/providers/sftp/hooks/test_sftp.py            |  60 +--
 tests/providers/sftp/operators/test_sftp.py        |  73 ++--
 tests/providers/sftp/sensors/test_sftp.py          |   9 +-
 .../singularity/operators/test_singularity.py      |   5 +-
 tests/providers/slack/hooks/test_slack.py          |  18 +-
 tests/providers/slack/hooks/test_slack_webhook.py  |   8 +-
 tests/providers/slack/operators/test_slack.py      |  50 +--
 .../slack/operators/test_slack_webhook.py          |  24 +-
 tests/providers/snowflake/hooks/test_snowflake.py  |  14 +-
 tests/providers/sqlite/hooks/test_sqlite.py        |  10 +-
 tests/providers/ssh/hooks/test_ssh.py              |  24 +-
 tests/providers/ssh/operators/test_ssh.py          |  43 +-
 tests/providers/telegram/hooks/test_telegram.py    |  29 +-
 .../providers/telegram/operators/test_telegram.py  |  11 +-
 tests/providers/vertica/hooks/test_vertica.py      |  10 +-
 tests/providers/yandex/hooks/test_yandex.py        |  11 +-
 .../yandex/hooks/test_yandexcloud_dataproc.py      |  12 +-
 tests/providers/zendesk/hooks/test_zendesk.py      |   3 +-
 tests/secrets/test_local_filesystem.py             |  56 ++-
 tests/secrets/test_secrets.py                      |  12 +-
 tests/secrets/test_secrets_backends.py             |  20 +-
 tests/security/test_kerberos.py                    |  19 +-
 tests/sensors/test_base.py                         | 217 +++++------
 tests/sensors/test_bash.py                         |   4 +-
 tests/sensors/test_external_task_sensor.py         |  36 +-
 tests/sensors/test_filesystem.py                   |   8 +-
 tests/sensors/test_python.py                       |  14 +-
 tests/sensors/test_smart_sensor_operator.py        |  46 +--
 tests/sensors/test_sql_sensor.py                   |  50 +--
 tests/sensors/test_timeout_sensor.py               |   7 +-
 tests/sensors/test_weekday_sensor.py               |  13 +-
 tests/serialization/test_dag_serialization.py      | 185 +++++----
 tests/task/task_runner/test_cgroup_task_runner.py  |   4 +-
 .../task/task_runner/test_standard_task_runner.py  |  24 +-
 tests/task/task_runner/test_task_runner.py         |   6 +-
 tests/test_utils/perf/perf_kit/__init__.py         |   8 +-
 tests/test_utils/reset_warning_registry.py         |   4 +-
 .../test_remote_user_api_auth_backend.py           |  24 +-
 .../deps/test_dag_ti_slots_available_dep.py        |   4 +-
 tests/ti_deps/deps/test_dag_unpaused_dep.py        |   4 +-
 tests/ti_deps/deps/test_dagrun_exists_dep.py       |   4 +-
 tests/ti_deps/deps/test_dagrun_id_dep.py           |   8 +-
 tests/ti_deps/deps/test_not_in_retry_period_dep.py |  10 +-
 .../ti_deps/deps/test_pool_slots_available_dep.py  |   8 +-
 tests/ti_deps/deps/test_prev_dagrun_dep.py         |  12 +-
 tests/ti_deps/deps/test_ready_to_reschedule_dep.py |  14 +-
 tests/ti_deps/deps/test_runnable_exec_date_dep.py  |   8 +-
 tests/ti_deps/deps/test_task_concurrency.py        |   8 +-
 tests/ti_deps/deps/test_task_not_running_dep.py    |   4 +-
 tests/ti_deps/deps/test_trigger_rule_dep.py        | 100 +++--
 tests/ti_deps/deps/test_valid_state_dep.py         |   8 +-
 tests/utils/log/test_file_processor_handler.py     |  24 +-
 tests/utils/log/test_json_formatter.py             |  12 +-
 tests/utils/log/test_log_reader.py                 | 122 +++---
 tests/utils/test_cli_util.py                       |  37 +-
 tests/utils/test_compression.py                    |  20 +-
 tests/utils/test_dag_cycle.py                      |  24 +-
 tests/utils/test_dag_processing.py                 |  32 +-
 tests/utils/test_dates.py                          |  45 ++-
 tests/utils/test_db.py                             |   4 +-
 tests/utils/test_decorators.py                     |  22 +-
 tests/utils/test_docs.py                           |   2 +-
 tests/utils/test_dot_renderer.py                   |  32 +-
 tests/utils/test_email.py                          | 133 +++----
 tests/utils/test_helpers.py                        |  62 +--
 tests/utils/test_json.py                           |  34 +-
 tests/utils/test_log_handlers.py                   |  51 +--
 tests/utils/test_logging_mixin.py                  |  14 +-
 tests/utils/test_module_loading.py                 |   8 +-
 tests/utils/test_net.py                            |  12 +-
 tests/utils/test_operator_helpers.py               |  42 +-
 tests/utils/test_process_utils.py                  |  52 +--
 tests/utils/test_python_virtualenv.py              |   6 +-
 tests/utils/test_serve_logs.py                     |   2 +-
 tests/utils/test_sqlalchemy.py                     |  18 +-
 .../test_task_handler_with_custom_formatter.py     |   4 +-
 tests/utils/test_timezone.py                       |  40 +-
 tests/utils/test_trigger_rule.py                   |  20 +-
 tests/utils/test_weekday.py                        |  18 +-
 tests/utils/test_weight_rule.py                    |   8 +-
 .../www/api/experimental/test_dag_runs_endpoint.py |  46 +--
 tests/www/api/experimental/test_endpoints.py       | 178 +++++----
 tests/www/test_app.py                              |  38 +-
 tests/www/test_init_views.py                       |   9 +-
 tests/www/test_security.py                         |  73 ++--
 tests/www/test_utils.py                            | 117 +++---
 tests/www/test_validators.py                       |  62 ++-
 tests/www/test_views.py                            | 348 ++++++++---------
 534 files changed, 9942 insertions(+), 10443 deletions(-)

diff --git a/TESTING.rst b/TESTING.rst
index 30e6fff..1efc63c3 100644
--- a/TESTING.rst
+++ b/TESTING.rst
@@ -232,7 +232,7 @@ Example test here:
             res = render_chart('GIT-SYNC', helm_settings,
                                show_only=["templates/scheduler/scheduler-deployment.yaml"])
             dep: k8s.V1Deployment = render_k8s_object(res[0], k8s.V1Deployment)
-            self.assertEqual("dags", dep.spec.template.spec.volumes[1].name)
+            assert "dags" == dep.spec.template.spec.volumes[1].name
 
 To run tests using breeze run the following command
 
@@ -330,7 +330,7 @@ Example of the ``redis`` integration test:
         hook = RedisHook(redis_conn_id='redis_default')
         redis = hook.get_conn()
 
-        self.assertTrue(redis.ping(), 'Connection to Redis with PING works.')
+        assert redis.ping(), 'Connection to Redis with PING works.'
 
 The markers can be specified at the test level or the class level (then all tests in this class
 require an integration). You can add multiple markers with different integrations for tests that
diff --git a/chart/tests/test_basic_helm_chart.py b/chart/tests/test_basic_helm_chart.py
index 7d835a6..f8526b0 100644
--- a/chart/tests/test_basic_helm_chart.py
+++ b/chart/tests/test_basic_helm_chart.py
@@ -39,52 +39,47 @@ class TestBaseChartTest(unittest.TestCase):
         list_of_kind_names_tuples = [
             (k8s_object['kind'], k8s_object['metadata']['name']) for k8s_object in k8s_objects
         ]
-        self.assertEqual(
-            list_of_kind_names_tuples,
-            [
-                ('ServiceAccount', 'TEST-BASIC-scheduler'),
-                ('ServiceAccount', 'TEST-BASIC-webserver'),
-                ('ServiceAccount', 'TEST-BASIC-worker'),
-                ('Secret', 'TEST-BASIC-postgresql'),
-                ('Secret', 'TEST-BASIC-airflow-metadata'),
-                ('Secret', 'TEST-BASIC-airflow-result-backend'),
-                ('ConfigMap', 'TEST-BASIC-airflow-config'),
-                ('Role', 'TEST-BASIC-pod-launcher-role'),
-                ('Role', 'TEST-BASIC-pod-log-reader-role'),
-                ('RoleBinding', 'TEST-BASIC-pod-launcher-rolebinding'),
-                ('RoleBinding', 'TEST-BASIC-pod-log-reader-rolebinding'),
-                ('Service', 'TEST-BASIC-postgresql-headless'),
-                ('Service', 'TEST-BASIC-postgresql'),
-                ('Service', 'TEST-BASIC-statsd'),
-                ('Service', 'TEST-BASIC-webserver'),
-                ('Deployment', 'TEST-BASIC-scheduler'),
-                ('Deployment', 'TEST-BASIC-statsd'),
-                ('Deployment', 'TEST-BASIC-webserver'),
-                ('StatefulSet', 'TEST-BASIC-postgresql'),
-                ('Secret', 'TEST-BASIC-fernet-key'),
-                ('Job', 'TEST-BASIC-create-user'),
-                ('Job', 'TEST-BASIC-run-airflow-migrations'),
-            ],
-        )
-        self.assertEqual(OBJECT_COUNT_IN_BASIC_DEPLOYMENT, len(k8s_objects))
+        assert list_of_kind_names_tuples == [
+            ('ServiceAccount', 'TEST-BASIC-scheduler'),
+            ('ServiceAccount', 'TEST-BASIC-webserver'),
+            ('ServiceAccount', 'TEST-BASIC-worker'),
+            ('Secret', 'TEST-BASIC-postgresql'),
+            ('Secret', 'TEST-BASIC-airflow-metadata'),
+            ('Secret', 'TEST-BASIC-airflow-result-backend'),
+            ('ConfigMap', 'TEST-BASIC-airflow-config'),
+            ('Role', 'TEST-BASIC-pod-launcher-role'),
+            ('Role', 'TEST-BASIC-pod-log-reader-role'),
+            ('RoleBinding', 'TEST-BASIC-pod-launcher-rolebinding'),
+            ('RoleBinding', 'TEST-BASIC-pod-log-reader-rolebinding'),
+            ('Service', 'TEST-BASIC-postgresql-headless'),
+            ('Service', 'TEST-BASIC-postgresql'),
+            ('Service', 'TEST-BASIC-statsd'),
+            ('Service', 'TEST-BASIC-webserver'),
+            ('Deployment', 'TEST-BASIC-scheduler'),
+            ('Deployment', 'TEST-BASIC-statsd'),
+            ('Deployment', 'TEST-BASIC-webserver'),
+            ('StatefulSet', 'TEST-BASIC-postgresql'),
+            ('Secret', 'TEST-BASIC-fernet-key'),
+            ('Job', 'TEST-BASIC-create-user'),
+            ('Job', 'TEST-BASIC-run-airflow-migrations'),
+        ]
+        assert OBJECT_COUNT_IN_BASIC_DEPLOYMENT == len(k8s_objects)
         for k8s_object in k8s_objects:
             labels = jmespath.search('metadata.labels', k8s_object) or {}
             if 'postgresql' in labels.get('chart'):
                 continue
             k8s_name = k8s_object['kind'] + ":" + k8s_object['metadata']['name']
-            self.assertEqual(
-                'TEST-VALUE',
-                labels.get("TEST-LABEL"),
-                f"Missing label TEST-LABEL on {k8s_name}. Current labels: {labels}",
-            )
+            assert 'TEST-VALUE' == labels.get(
+                "TEST-LABEL"
+            ), f"Missing label TEST-LABEL on {k8s_name}. Current labels: {labels}"
 
     def test_basic_deployment_without_default_users(self):
         k8s_objects = render_chart("TEST-BASIC", {"webserver": {'defaultUser': {'enabled': False}}})
         list_of_kind_names_tuples = [
             (k8s_object['kind'], k8s_object['metadata']['name']) for k8s_object in k8s_objects
         ]
-        self.assertNotIn(('Job', 'TEST-BASIC-create-user'), list_of_kind_names_tuples)
-        self.assertEqual(OBJECT_COUNT_IN_BASIC_DEPLOYMENT - 1, len(k8s_objects))
+        assert ('Job', 'TEST-BASIC-create-user') not in list_of_kind_names_tuples
+        assert OBJECT_COUNT_IN_BASIC_DEPLOYMENT - 1 == len(k8s_objects)
 
     def test_network_policies_are_valid(self):
         k8s_objects = render_chart(
@@ -109,7 +104,7 @@ class TestBaseChartTest(unittest.TestCase):
             ('NetworkPolicy', 'TEST-BASIC-worker-policy'),
         ]
         for kind_name in expected_kind_names:
-            self.assertIn(kind_name, kind_names_tuples)
+            assert kind_name in kind_names_tuples
 
     def test_chart_is_consistent_with_official_airflow_image(self):
         def get_k8s_objs_with_image(obj: Union[List[Any], Dict[str, Any]]) -> List[Dict[str, Any]]:
@@ -137,4 +132,4 @@ class TestBaseChartTest(unittest.TestCase):
             image: str = obj["image"]  # pylint: disable=invalid-sequence-index
             if image.startswith(image_repo):
                 # Make sure that a command is not specified
-                self.assertNotIn("command", obj)
+                assert "command" not in obj
diff --git a/chart/tests/test_celery_kubernetes_executor.py b/chart/tests/test_celery_kubernetes_executor.py
index 6c54e80..6e22ad2 100644
--- a/chart/tests/test_celery_kubernetes_executor.py
+++ b/chart/tests/test_celery_kubernetes_executor.py
@@ -32,8 +32,8 @@ class CeleryKubernetesExecutorTest(unittest.TestCase):
             show_only=["templates/workers/worker-deployment.yaml"],
         )
 
-        self.assertEqual("config", jmespath.search("spec.template.spec.volumes[0].name", docs[0]))
-        self.assertEqual("dags", jmespath.search("spec.template.spec.volumes[1].name", docs[0]))
+        assert "config" == jmespath.search("spec.template.spec.volumes[0].name", docs[0])
+        assert "dags" == jmespath.search("spec.template.spec.volumes[1].name", docs[0])
 
     def test_should_create_a_worker_deployment_with_the_celery_kubernetes_executor(self):
         docs = render_chart(
@@ -44,5 +44,5 @@ class CeleryKubernetesExecutorTest(unittest.TestCase):
             show_only=["templates/workers/worker-deployment.yaml"],
         )
 
-        self.assertEqual("config", jmespath.search("spec.template.spec.volumes[0].name", docs[0]))
-        self.assertEqual("dags", jmespath.search("spec.template.spec.volumes[1].name", docs[0]))
+        assert "config" == jmespath.search("spec.template.spec.volumes[0].name", docs[0])
+        assert "dags" == jmespath.search("spec.template.spec.volumes[1].name", docs[0])
diff --git a/chart/tests/test_cleanup_pods.py b/chart/tests/test_cleanup_pods.py
index df2b3a0..68f5b7c 100644
--- a/chart/tests/test_cleanup_pods.py
+++ b/chart/tests/test_cleanup_pods.py
@@ -31,27 +31,21 @@ class CleanupPodsTest(unittest.TestCase):
             show_only=["templates/cleanup/cleanup-cronjob.yaml"],
         )
 
-        self.assertEqual(
-            "airflow-cleanup-pods",
-            jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].name", docs[0]),
+        assert "airflow-cleanup-pods" == jmespath.search(
+            "spec.jobTemplate.spec.template.spec.containers[0].name", docs[0]
         )
-        self.assertEqual(
-            "apache/airflow:2.0.0",
-            jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].image", docs[0]),
+        assert "apache/airflow:2.0.0" == jmespath.search(
+            "spec.jobTemplate.spec.template.spec.containers[0].image", docs[0]
         )
-        self.assertIn(
-            {"name": "config", "configMap": {"name": "RELEASE-NAME-airflow-config"}},
-            jmespath.search("spec.jobTemplate.spec.template.spec.volumes", docs[0]),
-        )
-        self.assertIn(
-            {
-                "name": "config",
-                "mountPath": "/opt/airflow/airflow.cfg",
-                "subPath": "airflow.cfg",
-                "readOnly": True,
-            },
-            jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0]),
+        assert {"name": "config", "configMap": {"name": "RELEASE-NAME-airflow-config"}} in jmespath.search(
+            "spec.jobTemplate.spec.template.spec.volumes", docs[0]
         )
+        assert {
+            "name": "config",
+            "mountPath": "/opt/airflow/airflow.cfg",
+            "subPath": "airflow.cfg",
+            "readOnly": True,
+        } in jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0])
 
     def test_should_change_image_when_set_airflow_image(self):
         docs = render_chart(
@@ -62,7 +56,6 @@ class CleanupPodsTest(unittest.TestCase):
             show_only=["templates/cleanup/cleanup-cronjob.yaml"],
         )
 
-        self.assertEqual(
-            "airflow:test",
-            jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].image", docs[0]),
+        assert "airflow:test" == jmespath.search(
+            "spec.jobTemplate.spec.template.spec.containers[0].image", docs[0]
         )
diff --git a/chart/tests/test_dags_persistent_volume_claim.py b/chart/tests/test_dags_persistent_volume_claim.py
index 946c40f..b0035c7 100644
--- a/chart/tests/test_dags_persistent_volume_claim.py
+++ b/chart/tests/test_dags_persistent_volume_claim.py
@@ -29,7 +29,7 @@ class DagsPersistentVolumeClaimTest(unittest.TestCase):
             show_only=["templates/dags-persistent-volume-claim.yaml"],
         )
 
-        self.assertEqual(0, len(docs))
+        assert 0 == len(docs)
 
     def test_should_not_generate_a_document_when_using_an_existing_claim(self):
         docs = render_chart(
@@ -37,7 +37,7 @@ class DagsPersistentVolumeClaimTest(unittest.TestCase):
             show_only=["templates/dags-persistent-volume-claim.yaml"],
         )
 
-        self.assertEqual(0, len(docs))
+        assert 0 == len(docs)
 
     def test_should_generate_a_document_if_persistence_is_enabled_and_not_using_an_existing_claim(self):
         docs = render_chart(
@@ -45,7 +45,7 @@ class DagsPersistentVolumeClaimTest(unittest.TestCase):
             show_only=["templates/dags-persistent-volume-claim.yaml"],
         )
 
-        self.assertEqual(1, len(docs))
+        assert 1 == len(docs)
 
     def test_should_set_pvc_details_correctly(self):
         docs = render_chart(
@@ -63,11 +63,8 @@ class DagsPersistentVolumeClaimTest(unittest.TestCase):
             show_only=["templates/dags-persistent-volume-claim.yaml"],
         )
 
-        self.assertEqual(
-            {
-                "accessModes": ["ReadWriteMany"],
-                "resources": {"requests": {"storage": "1G"}},
-                "storageClassName": "MyStorageClass",
-            },
-            jmespath.search("spec", docs[0]),
-        )
+        assert {
+            "accessModes": ["ReadWriteMany"],
+            "resources": {"requests": {"storage": "1G"}},
+            "storageClassName": "MyStorageClass",
+        } == jmespath.search("spec", docs[0])
diff --git a/chart/tests/test_extra_configmaps_secrets.py b/chart/tests/test_extra_configmaps_secrets.py
index 378d80e..88fb77a 100644
--- a/chart/tests/test_extra_configmaps_secrets.py
+++ b/chart/tests/test_extra_configmaps_secrets.py
@@ -50,7 +50,7 @@ class ExtraConfigMapsSecretsTest(unittest.TestCase):
             ("ConfigMap", f"{RELEASE_NAME}-airflow-variables"),
             ("ConfigMap", f"{RELEASE_NAME}-other-variables"),
         ]
-        self.assertEqual(set(k8s_objects_by_key.keys()), set(all_expected_keys))
+        assert set(k8s_objects_by_key.keys()) == set(all_expected_keys)
 
         all_expected_data = [
             {"AIRFLOW_VAR_HELLO_MESSAGE": "Hi!", "AIRFLOW_VAR_KUBERNETES_NAMESPACE": "default"},
@@ -58,7 +58,7 @@ class ExtraConfigMapsSecretsTest(unittest.TestCase):
         ]
         for expected_key, expected_data in zip(all_expected_keys, all_expected_data):
             configmap_obj = k8s_objects_by_key[expected_key]
-            self.assertEqual(configmap_obj["data"], expected_data)
+            assert configmap_obj["data"] == expected_data
 
     def test_extra_secrets(self):
         values_str = textwrap.dedent(
@@ -88,7 +88,7 @@ class ExtraConfigMapsSecretsTest(unittest.TestCase):
             ("Secret", f"{RELEASE_NAME}-airflow-connections"),
             ("Secret", f"{RELEASE_NAME}-other-secrets"),
         ]
-        self.assertEqual(set(k8s_objects_by_key.keys()), set(all_expected_keys))
+        assert set(k8s_objects_by_key.keys()) == set(all_expected_keys)
 
         all_expected_data = [
             {"AIRFLOW_CON_AWS": b64encode(b"aws_connection_string").decode("utf-8")},
@@ -106,5 +106,5 @@ class ExtraConfigMapsSecretsTest(unittest.TestCase):
             all_expected_keys, all_expected_data, all_expected_string_data
         ):
             configmap_obj = k8s_objects_by_key[expected_key]
-            self.assertEqual(configmap_obj["data"], expected_data)
-            self.assertEqual(configmap_obj["stringData"], expected_string_data)
+            assert configmap_obj["data"] == expected_data
+            assert configmap_obj["stringData"] == expected_string_data
diff --git a/chart/tests/test_extra_env_env_from.py b/chart/tests/test_extra_env_env_from.py
index 7e1b28d..a2ac58f 100644
--- a/chart/tests/test_extra_env_env_from.py
+++ b/chart/tests/test_extra_env_env_from.py
@@ -98,7 +98,7 @@ class ExtraEnvEnvFromTest(unittest.TestCase):
         k8s_object = self.k8s_objects_by_key[k8s_obj_key]
         for path in env_paths:
             env = jmespath.search(f"{path}.env", k8s_object)
-            self.assertIn(expected_env_as_str, yaml.dump(env))
+            assert expected_env_as_str in yaml.dump(env)
 
     @parameterized.expand(PARAMS)
     def test_extra_env_from(self, k8s_obj_key, env_from_paths):
@@ -114,4 +114,4 @@ class ExtraEnvEnvFromTest(unittest.TestCase):
         k8s_object = self.k8s_objects_by_key[k8s_obj_key]
         for path in env_from_paths:
             env_from = jmespath.search(f"{path}.envFrom", k8s_object)
-            self.assertIn(expected_env_from_as_str, yaml.dump(env_from))
+            assert expected_env_from_as_str in yaml.dump(env_from)
diff --git a/chart/tests/test_flower_authorization.py b/chart/tests/test_flower_authorization.py
index 0520ddd..4ef4db9 100644
--- a/chart/tests/test_flower_authorization.py
+++ b/chart/tests/test_flower_authorization.py
@@ -33,17 +33,14 @@ class FlowerAuthorizationTest(unittest.TestCase):
             show_only=["templates/flower/flower-deployment.yaml"],
         )
 
-        self.assertEqual(
-            "AIRFLOW__CELERY__FLOWER_BASIC_AUTH",
-            jmespath.search("spec.template.spec.containers[0].env[0].name", docs[0]),
+        assert "AIRFLOW__CELERY__FLOWER_BASIC_AUTH" == jmespath.search(
+            "spec.template.spec.containers[0].env[0].name", docs[0]
         )
-        self.assertEqual(
-            ['curl', '--user', '$AIRFLOW__CELERY__FLOWER_BASIC_AUTH', 'localhost:7777'],
-            jmespath.search("spec.template.spec.containers[0].livenessProbe.exec.command", docs[0]),
+        assert ['curl', '--user', '$AIRFLOW__CELERY__FLOWER_BASIC_AUTH', 'localhost:7777'] == jmespath.search(
+            "spec.template.spec.containers[0].livenessProbe.exec.command", docs[0]
         )
-        self.assertEqual(
-            ['curl', '--user', '$AIRFLOW__CELERY__FLOWER_BASIC_AUTH', 'localhost:7777'],
-            jmespath.search("spec.template.spec.containers[0].readinessProbe.exec.command", docs[0]),
+        assert ['curl', '--user', '$AIRFLOW__CELERY__FLOWER_BASIC_AUTH', 'localhost:7777'] == jmespath.search(
+            "spec.template.spec.containers[0].readinessProbe.exec.command", docs[0]
         )
 
     def test_should_create_flower_deployment_without_authorization(self):
@@ -55,15 +52,12 @@ class FlowerAuthorizationTest(unittest.TestCase):
             show_only=["templates/flower/flower-deployment.yaml"],
         )
 
-        self.assertEqual(
-            "AIRFLOW__CORE__FERNET_KEY",
-            jmespath.search("spec.template.spec.containers[0].env[0].name", docs[0]),
+        assert "AIRFLOW__CORE__FERNET_KEY" == jmespath.search(
+            "spec.template.spec.containers[0].env[0].name", docs[0]
         )
-        self.assertEqual(
-            ['curl', 'localhost:7777'],
-            jmespath.search("spec.template.spec.containers[0].livenessProbe.exec.command", docs[0]),
+        assert ['curl', 'localhost:7777'] == jmespath.search(
+            "spec.template.spec.containers[0].livenessProbe.exec.command", docs[0]
         )
-        self.assertEqual(
-            ['curl', 'localhost:7777'],
-            jmespath.search("spec.template.spec.containers[0].readinessProbe.exec.command", docs[0]),
+        assert ['curl', 'localhost:7777'] == jmespath.search(
+            "spec.template.spec.containers[0].readinessProbe.exec.command", docs[0]
         )
diff --git a/chart/tests/test_git_sync_scheduler.py b/chart/tests/test_git_sync_scheduler.py
index 58ea1c7..1bfdf27 100644
--- a/chart/tests/test_git_sync_scheduler.py
+++ b/chart/tests/test_git_sync_scheduler.py
@@ -29,7 +29,7 @@ class GitSyncSchedulerTest(unittest.TestCase):
             show_only=["templates/scheduler/scheduler-deployment.yaml"],
         )
 
-        self.assertEqual("dags", jmespath.search("spec.template.spec.volumes[1].name", docs[0]))
+        assert "dags" == jmespath.search("spec.template.spec.volumes[1].name", docs[0])
 
     def test_validate_the_git_sync_container_spec(self):
         docs = render_chart(
@@ -64,27 +64,24 @@ class GitSyncSchedulerTest(unittest.TestCase):
             show_only=["templates/scheduler/scheduler-deployment.yaml"],
         )
 
-        self.assertEqual(
-            {
-                "name": "git-sync-test",
-                "securityContext": {"runAsUser": 65533},
-                "image": "test-registry/test-repo:test-tag",
-                "imagePullPolicy": "Allways",
-                "env": [
-                    {"name": "GIT_SYNC_REV", "value": "HEAD"},
-                    {"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
-                    {"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
-                    {"name": "GIT_SYNC_DEPTH", "value": "1"},
-                    {"name": "GIT_SYNC_ROOT", "value": "/git-root"},
-                    {"name": "GIT_SYNC_DEST", "value": "test-dest"},
-                    {"name": "GIT_SYNC_ADD_USER", "value": "true"},
-                    {"name": "GIT_SYNC_WAIT", "value": "66"},
-                    {"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
-                ],
-                "volumeMounts": [{"mountPath": "/git-root", "name": "dags"}],
-            },
-            jmespath.search("spec.template.spec.containers[1]", docs[0]),
-        )
+        assert {
+            "name": "git-sync-test",
+            "securityContext": {"runAsUser": 65533},
+            "image": "test-registry/test-repo:test-tag",
+            "imagePullPolicy": "Allways",
+            "env": [
+                {"name": "GIT_SYNC_REV", "value": "HEAD"},
+                {"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
+                {"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
+                {"name": "GIT_SYNC_DEPTH", "value": "1"},
+                {"name": "GIT_SYNC_ROOT", "value": "/git-root"},
+                {"name": "GIT_SYNC_DEST", "value": "test-dest"},
+                {"name": "GIT_SYNC_ADD_USER", "value": "true"},
+                {"name": "GIT_SYNC_WAIT", "value": "66"},
+                {"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
+            ],
+            "volumeMounts": [{"mountPath": "/git-root", "name": "dags"}],
+        } == jmespath.search("spec.template.spec.containers[1]", docs[0])
 
     def test_validate_if_ssh_params_are_added(self):
         docs = render_chart(
@@ -102,22 +99,19 @@ class GitSyncSchedulerTest(unittest.TestCase):
             show_only=["templates/scheduler/scheduler-deployment.yaml"],
         )
 
-        self.assertIn(
-            {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"},
-            jmespath.search("spec.template.spec.containers[1].env", docs[0]),
+        assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
+            "spec.template.spec.containers[1].env", docs[0]
         )
-        self.assertIn(
-            {"name": "GIT_SYNC_SSH", "value": "true"},
-            jmespath.search("spec.template.spec.containers[1].env", docs[0]),
+        assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search(
+            "spec.template.spec.containers[1].env", docs[0]
         )
-        self.assertIn(
-            {"name": "GIT_KNOWN_HOSTS", "value": "false"},
-            jmespath.search("spec.template.spec.containers[1].env", docs[0]),
-        )
-        self.assertIn(
-            {"name": "git-sync-ssh-key", "secret": {"secretName": "ssh-secret", "defaultMode": 288}},
-            jmespath.search("spec.template.spec.volumes", docs[0]),
+        assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search(
+            "spec.template.spec.containers[1].env", docs[0]
         )
+        assert {
+            "name": "git-sync-ssh-key",
+            "secret": {"secretName": "ssh-secret", "defaultMode": 288},
+        } in jmespath.search("spec.template.spec.volumes", docs[0])
 
     def test_should_set_username_and_pass_env_variables(self):
         docs = render_chart(
@@ -133,20 +127,14 @@ class GitSyncSchedulerTest(unittest.TestCase):
             show_only=["templates/scheduler/scheduler-deployment.yaml"],
         )
 
-        self.assertIn(
-            {
-                "name": "GIT_SYNC_USERNAME",
-                "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
-            },
-            jmespath.search("spec.template.spec.containers[1].env", docs[0]),
-        )
-        self.assertIn(
-            {
-                "name": "GIT_SYNC_PASSWORD",
-                "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
-            },
-            jmespath.search("spec.template.spec.containers[1].env", docs[0]),
-        )
+        assert {
+            "name": "GIT_SYNC_USERNAME",
+            "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
+        } in jmespath.search("spec.template.spec.containers[1].env", docs[0])
+        assert {
+            "name": "GIT_SYNC_PASSWORD",
+            "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
+        } in jmespath.search("spec.template.spec.containers[1].env", docs[0])
 
     def test_should_set_the_volume_claim_correctly_when_using_an_existing_claim(self):
         docs = render_chart(
@@ -154,9 +142,8 @@ class GitSyncSchedulerTest(unittest.TestCase):
             show_only=["templates/scheduler/scheduler-deployment.yaml"],
         )
 
-        self.assertIn(
-            {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}},
-            jmespath.search("spec.template.spec.volumes", docs[0]),
+        assert {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}} in jmespath.search(
+            "spec.template.spec.volumes", docs[0]
         )
 
     def test_should_add_extra_volume_and_extra_volume_mount(self):
@@ -176,10 +163,9 @@ class GitSyncSchedulerTest(unittest.TestCase):
             show_only=["templates/scheduler/scheduler-deployment.yaml"],
         )
 
-        self.assertIn(
-            {"name": "test-volume", "emptyDir": {}}, jmespath.search("spec.template.spec.volumes", docs[0])
+        assert {"name": "test-volume", "emptyDir": {}} in jmespath.search(
+            "spec.template.spec.volumes", docs[0]
         )
-        self.assertIn(
-            {"name": "test-volume", "mountPath": "/opt/test"},
-            jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0]),
+        assert {"name": "test-volume", "mountPath": "/opt/test"} in jmespath.search(
+            "spec.template.spec.containers[0].volumeMounts", docs[0]
         )
diff --git a/chart/tests/test_git_sync_webserver.py b/chart/tests/test_git_sync_webserver.py
index 09c9aa3..a232287 100644
--- a/chart/tests/test_git_sync_webserver.py
+++ b/chart/tests/test_git_sync_webserver.py
@@ -29,7 +29,7 @@ class GitSyncWebserverTest(unittest.TestCase):
             show_only=["templates/webserver/webserver-deployment.yaml"],
         )
 
-        self.assertEqual("dags", jmespath.search("spec.template.spec.volumes[1].name", docs[0]))
+        assert "dags" == jmespath.search("spec.template.spec.volumes[1].name", docs[0])
 
     def test_should_add_dags_volume_to_the_webserver_if_git_sync_is_enabled_and_persistence_is_disabled(self):
         docs = render_chart(
@@ -37,7 +37,7 @@ class GitSyncWebserverTest(unittest.TestCase):
             show_only=["templates/webserver/webserver-deployment.yaml"],
         )
 
-        self.assertEqual("dags", jmespath.search("spec.template.spec.volumes[1].name", docs[0]))
+        assert "dags" == jmespath.search("spec.template.spec.volumes[1].name", docs[0])
 
     def test_should_add_git_sync_container_to_webserver_if_persistence_is_not_enabled_but_git_sync_is(self):
         docs = render_chart(
@@ -50,7 +50,7 @@ class GitSyncWebserverTest(unittest.TestCase):
             show_only=["templates/webserver/webserver-deployment.yaml"],
         )
 
-        self.assertEqual("git-sync", jmespath.search("spec.template.spec.containers[0].name", docs[0]))
+        assert "git-sync" == jmespath.search("spec.template.spec.containers[0].name", docs[0])
 
     def test_should_have_service_account_defined(self):
         docs = render_chart(
@@ -58,6 +58,4 @@ class GitSyncWebserverTest(unittest.TestCase):
             show_only=["templates/webserver/webserver-deployment.yaml"],
         )
 
-        self.assertEqual(
-            "RELEASE-NAME-webserver", jmespath.search("spec.template.spec.serviceAccountName", docs[0])
-        )
+        assert "RELEASE-NAME-webserver" == jmespath.search("spec.template.spec.serviceAccountName", docs[0])
diff --git a/chart/tests/test_git_sync_worker.py b/chart/tests/test_git_sync_worker.py
index a56b0dc..c48d001 100644
--- a/chart/tests/test_git_sync_worker.py
+++ b/chart/tests/test_git_sync_worker.py
@@ -32,8 +32,8 @@ class GitSyncWorkerTest(unittest.TestCase):
             show_only=["templates/workers/worker-deployment.yaml"],
         )
 
-        self.assertEqual("config", jmespath.search("spec.template.spec.volumes[0].name", docs[0]))
-        self.assertEqual("dags", jmespath.search("spec.template.spec.volumes[1].name", docs[0]))
+        assert "config" == jmespath.search("spec.template.spec.volumes[0].name", docs[0])
+        assert "dags" == jmespath.search("spec.template.spec.volumes[1].name", docs[0])
 
     def test_should_add_dags_volume_to_the_worker_if_git_sync_is_enabled_and_persistence_is_disabled(self):
         docs = render_chart(
@@ -44,8 +44,8 @@ class GitSyncWorkerTest(unittest.TestCase):
             show_only=["templates/workers/worker-deployment.yaml"],
         )
 
-        self.assertEqual("config", jmespath.search("spec.template.spec.volumes[0].name", docs[0]))
-        self.assertEqual("dags", jmespath.search("spec.template.spec.volumes[1].name", docs[0]))
+        assert "config" == jmespath.search("spec.template.spec.volumes[0].name", docs[0])
+        assert "dags" == jmespath.search("spec.template.spec.volumes[1].name", docs[0])
 
     def test_should_add_git_sync_container_to_worker_if_persistence_is_not_enabled_but_git_sync_is(self):
         docs = render_chart(
@@ -59,7 +59,7 @@ class GitSyncWorkerTest(unittest.TestCase):
             show_only=["templates/workers/worker-deployment.yaml"],
         )
 
-        self.assertEqual("git-sync", jmespath.search("spec.template.spec.containers[0].name", docs[0]))
+        assert "git-sync" == jmespath.search("spec.template.spec.containers[0].name", docs[0])
 
     def test_should_not_add_sync_container_to_worker_if_git_sync_and_persistence_are_enabled(self):
         docs = render_chart(
@@ -73,4 +73,4 @@ class GitSyncWorkerTest(unittest.TestCase):
             show_only=["templates/workers/worker-deployment.yaml"],
         )
 
-        self.assertNotEqual("git-sync", jmespath.search("spec.template.spec.containers[0].name", docs[0]))
+        assert "git-sync" != jmespath.search("spec.template.spec.containers[0].name", docs[0])
diff --git a/chart/tests/test_ingress_web.py b/chart/tests/test_ingress_web.py
index 7fcedaf..6d64b8b 100644
--- a/chart/tests/test_ingress_web.py
+++ b/chart/tests/test_ingress_web.py
@@ -34,4 +34,4 @@ class IngressWebTest(unittest.TestCase):
             values={"ingress": {"enabled": True, "web": {"annotations": {"aa": "bb", "cc": "dd"}}}},
             show_only=["templates/webserver/webserver-ingress.yaml"],
         )
-        self.assertEqual({"aa": "bb", "cc": "dd"}, jmespath.search("metadata.annotations", docs[0]))
+        assert {"aa": "bb", "cc": "dd"} == jmespath.search("metadata.annotations", docs[0])
diff --git a/chart/tests/test_keda.py b/chart/tests/test_keda.py
index 57da31a..132439d 100644
--- a/chart/tests/test_keda.py
+++ b/chart/tests/test_keda.py
@@ -30,7 +30,7 @@ class KedaTest(unittest.TestCase):
             show_only=["templates/workers/worker-kedaautoscaler.yaml"],
             validate_schema=False,
         )
-        self.assertListEqual(docs, [])
+        assert docs == []
 
     @parameterized.expand(
         [
@@ -52,6 +52,6 @@ class KedaTest(unittest.TestCase):
             validate_schema=False,
         )
         if is_created:
-            self.assertEqual("RELEASE-NAME-worker", jmespath.search("metadata.name", docs[0]))
+            assert "RELEASE-NAME-worker" == jmespath.search("metadata.name", docs[0])
         else:
-            self.assertListEqual(docs, [])
+            assert docs == []
diff --git a/chart/tests/test_kerberos.py b/chart/tests/test_kerberos.py
index b0cf88d..4676f65 100644
--- a/chart/tests/test_kerberos.py
+++ b/chart/tests/test_kerberos.py
@@ -29,4 +29,4 @@ class KerberosTest(unittest.TestCase):
             obj for obj in k8s_objects if obj["metadata"]["name"] != "NO-KERBEROS-airflow-config"
         ]
         k8s_objects_to_consider_str = json.dumps(k8s_objects_to_consider)
-        self.assertNotIn("kerberos", k8s_objects_to_consider_str)
+        assert "kerberos" not in k8s_objects_to_consider_str
diff --git a/chart/tests/test_migrate_database_job.py b/chart/tests/test_migrate_database_job.py
index 4b92aca..d1e9225 100644
--- a/chart/tests/test_migrate_database_job.py
+++ b/chart/tests/test_migrate_database_job.py
@@ -15,6 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
+import re
 import unittest
 
 import jmespath
@@ -29,7 +30,5 @@ class MigrateDatabaseJobTest(unittest.TestCase):
             show_only=["templates/migrate-database-job.yaml"],
         )
 
-        self.assertRegex(docs[0]["kind"], "Job")
-        self.assertEqual(
-            "run-airflow-migrations", jmespath.search("spec.template.spec.containers[0].name", docs[0])
-        )
+        assert re.search("Job", docs[0]["kind"])
+        assert "run-airflow-migrations" == jmespath.search("spec.template.spec.containers[0].name", docs[0])
diff --git a/chart/tests/test_pod_launcher_role.py b/chart/tests/test_pod_launcher_role.py
index aeca27e..99bfe5e 100644
--- a/chart/tests/test_pod_launcher_role.py
+++ b/chart/tests/test_pod_launcher_role.py
@@ -45,6 +45,6 @@ class PodLauncherTest(unittest.TestCase):
         )
         if expected_accounts:
             for idx, suffix in enumerate(expected_accounts):
-                self.assertEqual(f"RELEASE-NAME-{suffix}", jmespath.search(f"subjects[{idx}].name", docs[0]))
+                assert f"RELEASE-NAME-{suffix}" == jmespath.search(f"subjects[{idx}].name", docs[0])
         else:
-            self.assertEqual([], docs)
+            assert [] == docs
diff --git a/chart/tests/test_pod_template_file.py b/chart/tests/test_pod_template_file.py
index bd0b7ac..f58fd3b 100644
--- a/chart/tests/test_pod_template_file.py
+++ b/chart/tests/test_pod_template_file.py
@@ -15,6 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
+import re
 import unittest
 from os import remove
 from os.path import dirname, realpath
@@ -43,9 +44,9 @@ class PodTemplateFileTest(unittest.TestCase):
             show_only=["templates/pod-template-file.yaml"],
         )
 
-        self.assertRegex(docs[0]["kind"], "Pod")
-        self.assertIsNotNone(jmespath.search("spec.containers[0].image", docs[0]))
-        self.assertEqual("base", jmespath.search("spec.containers[0].name", docs[0]))
+        assert re.search("Pod", docs[0]["kind"])
+        assert jmespath.search("spec.containers[0].image", docs[0]) is not None
+        assert "base" == jmespath.search("spec.containers[0].name", docs[0])
 
     def test_should_add_an_init_container_if_git_sync_is_true(self):
         docs = render_chart(
@@ -79,29 +80,26 @@ class PodTemplateFileTest(unittest.TestCase):
             show_only=["templates/pod-template-file.yaml"],
         )
 
-        self.assertRegex(docs[0]["kind"], "Pod")
-        self.assertEqual(
-            {
-                "name": "git-sync-test",
-                "securityContext": {"runAsUser": 65533},
-                "image": "test-registry/test-repo:test-tag",
-                "imagePullPolicy": "Allways",
-                "env": [
-                    {"name": "GIT_SYNC_REV", "value": "HEAD"},
-                    {"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
-                    {"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
-                    {"name": "GIT_SYNC_DEPTH", "value": "1"},
-                    {"name": "GIT_SYNC_ROOT", "value": "/git-root"},
-                    {"name": "GIT_SYNC_DEST", "value": "test-dest"},
-                    {"name": "GIT_SYNC_ADD_USER", "value": "true"},
-                    {"name": "GIT_SYNC_WAIT", "value": "66"},
-                    {"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
-                    {"name": "GIT_SYNC_ONE_TIME", "value": "true"},
-                ],
-                "volumeMounts": [{"mountPath": "/git-root", "name": "dags"}],
-            },
-            jmespath.search("spec.initContainers[0]", docs[0]),
-        )
+        assert re.search("Pod", docs[0]["kind"])
+        assert {
+            "name": "git-sync-test",
+            "securityContext": {"runAsUser": 65533},
+            "image": "test-registry/test-repo:test-tag",
+            "imagePullPolicy": "Allways",
+            "env": [
+                {"name": "GIT_SYNC_REV", "value": "HEAD"},
+                {"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
+                {"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
+                {"name": "GIT_SYNC_DEPTH", "value": "1"},
+                {"name": "GIT_SYNC_ROOT", "value": "/git-root"},
+                {"name": "GIT_SYNC_DEST", "value": "test-dest"},
+                {"name": "GIT_SYNC_ADD_USER", "value": "true"},
+                {"name": "GIT_SYNC_WAIT", "value": "66"},
+                {"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
+                {"name": "GIT_SYNC_ONE_TIME", "value": "true"},
+            ],
+            "volumeMounts": [{"mountPath": "/git-root", "name": "dags"}],
+        } == jmespath.search("spec.initContainers[0]", docs[0])
 
     def test_validate_if_ssh_params_are_added(self):
         docs = render_chart(
@@ -119,21 +117,19 @@ class PodTemplateFileTest(unittest.TestCase):
             show_only=["templates/pod-template-file.yaml"],
         )
 
-        self.assertIn(
-            {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"},
-            jmespath.search("spec.initContainers[0].env", docs[0]),
-        )
-        self.assertIn(
-            {"name": "GIT_SYNC_SSH", "value": "true"}, jmespath.search("spec.initContainers[0].env", docs[0])
+        assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
+            "spec.initContainers[0].env", docs[0]
         )
-        self.assertIn(
-            {"name": "GIT_KNOWN_HOSTS", "value": "false"},
-            jmespath.search("spec.initContainers[0].env", docs[0]),
+        assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search(
+            "spec.initContainers[0].env", docs[0]
         )
-        self.assertIn(
-            {"name": "git-sync-ssh-key", "secret": {"secretName": "ssh-secret", "defaultMode": 288}},
-            jmespath.search("spec.volumes", docs[0]),
+        assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search(
+            "spec.initContainers[0].env", docs[0]
         )
+        assert {
+            "name": "git-sync-ssh-key",
+            "secret": {"secretName": "ssh-secret", "defaultMode": 288},
+        } in jmespath.search("spec.volumes", docs[0])
 
     def test_validate_if_ssh_known_hosts_are_added(self):
         docs = render_chart(
@@ -150,25 +146,18 @@ class PodTemplateFileTest(unittest.TestCase):
             },
             show_only=["templates/pod-template-file.yaml"],
         )
-        self.assertIn(
-            {"name": "GIT_KNOWN_HOSTS", "value": "true"},
-            jmespath.search("spec.initContainers[0].env", docs[0]),
-        )
-        self.assertIn(
-            {
-                "name": "git-sync-known-hosts",
-                "configMap": {"defaultMode": 288, "name": "RELEASE-NAME-airflow-config"},
-            },
-            jmespath.search("spec.volumes", docs[0]),
-        )
-        self.assertIn(
-            {
-                "name": "git-sync-known-hosts",
-                "mountPath": "/etc/git-secret/known_hosts",
-                "subPath": "known_hosts",
-            },
-            jmespath.search("spec.containers[0].volumeMounts", docs[0]),
+        assert {"name": "GIT_KNOWN_HOSTS", "value": "true"} in jmespath.search(
+            "spec.initContainers[0].env", docs[0]
         )
+        assert {
+            "name": "git-sync-known-hosts",
+            "configMap": {"defaultMode": 288, "name": "RELEASE-NAME-airflow-config"},
+        } in jmespath.search("spec.volumes", docs[0])
+        assert {
+            "name": "git-sync-known-hosts",
+            "mountPath": "/etc/git-secret/known_hosts",
+            "subPath": "known_hosts",
+        } in jmespath.search("spec.containers[0].volumeMounts", docs[0])
 
     def test_should_set_username_and_pass_env_variables(self):
         docs = render_chart(
@@ -184,20 +173,14 @@ class PodTemplateFileTest(unittest.TestCase):
             show_only=["templates/pod-template-file.yaml"],
         )
 
-        self.assertIn(
-            {
-                "name": "GIT_SYNC_USERNAME",
-                "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
-            },
-            jmespath.search("spec.initContainers[0].env", docs[0]),
-        )
-        self.assertIn(
-            {
-                "name": "GIT_SYNC_PASSWORD",
-                "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
-            },
-            jmespath.search("spec.initContainers[0].env", docs[0]),
-        )
+        assert {
+            "name": "GIT_SYNC_USERNAME",
+            "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
+        } in jmespath.search("spec.initContainers[0].env", docs[0])
+        assert {
+            "name": "GIT_SYNC_PASSWORD",
+            "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
+        } in jmespath.search("spec.initContainers[0].env", docs[0])
 
     def test_should_set_the_volume_claim_correctly_when_using_an_existing_claim(self):
         docs = render_chart(
@@ -205,9 +188,8 @@ class PodTemplateFileTest(unittest.TestCase):
             show_only=["templates/pod-template-file.yaml"],
         )
 
-        self.assertIn(
-            {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}},
-            jmespath.search("spec.volumes", docs[0]),
+        assert {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}} in jmespath.search(
+            "spec.volumes", docs[0]
         )
 
     def test_should_set_a_custom_image_in_pod_template(self):
@@ -216,9 +198,9 @@ class PodTemplateFileTest(unittest.TestCase):
             show_only=["templates/pod-template-file.yaml"],
         )
 
-        self.assertRegex(docs[0]["kind"], "Pod")
-        self.assertEqual("dummy_image:latest", jmespath.search("spec.containers[0].image", docs[0]))
-        self.assertEqual("base", jmespath.search("spec.containers[0].name", docs[0]))
+        assert re.search("Pod", docs[0]["kind"])
+        assert "dummy_image:latest" == jmespath.search("spec.containers[0].image", docs[0])
+        assert "base" == jmespath.search("spec.containers[0].name", docs[0])
 
     def test_mount_airflow_cfg(self):
         docs = render_chart(
@@ -226,20 +208,16 @@ class PodTemplateFileTest(unittest.TestCase):
             show_only=["templates/pod-template-file.yaml"],
         )
 
-        self.assertRegex(docs[0]["kind"], "Pod")
-        self.assertDictEqual(
-            {'configMap': {'name': 'RELEASE-NAME-airflow-config'}, 'name': 'config'},
-            jmespath.search("spec.volumes[1]", docs[0]),
-        )
-        self.assertDictEqual(
-            {
-                'name': 'config',
-                'mountPath': '/opt/airflow/airflow.cfg',
-                'subPath': 'airflow.cfg',
-                'readOnly': True,
-            },
-            jmespath.search("spec.containers[0].volumeMounts[1]", docs[0]),
+        assert re.search("Pod", docs[0]["kind"])
+        assert {'configMap': {'name': 'RELEASE-NAME-airflow-config'}, 'name': 'config'} == jmespath.search(
+            "spec.volumes[1]", docs[0]
         )
+        assert {
+            'name': 'config',
+            'mountPath': '/opt/airflow/airflow.cfg',
+            'subPath': 'airflow.cfg',
+            'readOnly': True,
+        } == jmespath.search("spec.containers[0].volumeMounts[1]", docs[0])
 
     def test_should_create_valid_affinity_and_node_selector(self):
         docs = render_chart(
@@ -265,29 +243,20 @@ class PodTemplateFileTest(unittest.TestCase):
             show_only=["templates/pod-template-file.yaml"],
         )
 
-        self.assertRegex(docs[0]["kind"], "Pod")
-        self.assertEqual(
-            "foo",
-            jmespath.search(
-                "spec.affinity.nodeAffinity."
-                "requiredDuringSchedulingIgnoredDuringExecution."
-                "nodeSelectorTerms[0]."
-                "matchExpressions[0]."
-                "key",
-                docs[0],
-            ),
+        assert re.search("Pod", docs[0]["kind"])
+        assert "foo" == jmespath.search(
+            "spec.affinity.nodeAffinity."
+            "requiredDuringSchedulingIgnoredDuringExecution."
+            "nodeSelectorTerms[0]."
+            "matchExpressions[0]."
+            "key",
+            docs[0],
         )
-        self.assertEqual(
-            "ssd",
-            jmespath.search(
-                "spec.nodeSelector.diskType",
-                docs[0],
-            ),
+        assert "ssd" == jmespath.search(
+            "spec.nodeSelector.diskType",
+            docs[0],
         )
-        self.assertEqual(
-            "dynamic-pods",
-            jmespath.search(
-                "spec.tolerations[0].key",
-                docs[0],
-            ),
+        assert "dynamic-pods" == jmespath.search(
+            "spec.tolerations[0].key",
+            docs[0],
         )
diff --git a/chart/tests/test_redis.py b/chart/tests/test_redis.py
index d4c5db7..a3e1168 100644
--- a/chart/tests/test_redis.py
+++ b/chart/tests/test_redis.py
@@ -14,11 +14,13 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+import re
 import unittest
 from base64 import b64decode
 from subprocess import CalledProcessError
 from typing import Optional
 
+import pytest
 from parameterized import parameterized
 
 from tests.helm_template_generator import prepare_k8s_lookup_dict, render_chart
@@ -67,16 +69,16 @@ class RedisTest(unittest.TestCase):
     ):
         if expected_password_match is not None:
             redis_password_in_password_secret = self.get_redis_password_in_password_secret(k8s_obj_by_key)
-            self.assertRegex(redis_password_in_password_secret, expected_password_match)
+            assert re.search(expected_password_match, redis_password_in_password_secret)
         else:
-            self.assertNotIn(REDIS_OBJECTS["SECRET_PASSWORD"], k8s_obj_by_key.keys())
+            assert REDIS_OBJECTS["SECRET_PASSWORD"] not in k8s_obj_by_key.keys()
 
         if expected_broker_url_match is not None:
             # assert redis broker url in secret
             broker_url_in_broker_url_secret = self.get_broker_url_in_broker_url_secret(k8s_obj_by_key)
-            self.assertRegex(broker_url_in_broker_url_secret, expected_broker_url_match)
+            assert re.search(expected_broker_url_match, broker_url_in_broker_url_secret)
         else:
-            self.assertNotIn(REDIS_OBJECTS["SECRET_BROKER_URL"], k8s_obj_by_key.keys())
+            assert REDIS_OBJECTS["SECRET_BROKER_URL"] not in k8s_obj_by_key.keys()
 
     def assert_broker_url_env(
         self, k8s_obj_by_key, expected_broker_url_secret_name=REDIS_OBJECTS["SECRET_BROKER_URL"][1]
@@ -84,11 +86,11 @@ class RedisTest(unittest.TestCase):
         broker_url_secret_in_scheduler = self.get_broker_url_secret_in_deployment(
             k8s_obj_by_key, "StatefulSet", "worker"
         )
-        self.assertEqual(broker_url_secret_in_scheduler, expected_broker_url_secret_name)
+        assert broker_url_secret_in_scheduler == expected_broker_url_secret_name
         broker_url_secret_in_worker = self.get_broker_url_secret_in_deployment(
             k8s_obj_by_key, "Deployment", "scheduler"
         )
-        self.assertEqual(broker_url_secret_in_worker, expected_broker_url_secret_name)
+        assert broker_url_secret_in_worker == expected_broker_url_secret_name
 
     @parameterized.expand(CELERY_EXECUTORS_PARAMS)
     def test_redis_by_chart_default(self, executor):
@@ -103,7 +105,7 @@ class RedisTest(unittest.TestCase):
         k8s_obj_by_key = prepare_k8s_lookup_dict(k8s_objects)
 
         created_redis_objects = SET_POSSIBLE_REDIS_OBJECT_KEYS & set(k8s_obj_by_key.keys())
-        self.assertEqual(created_redis_objects, SET_POSSIBLE_REDIS_OBJECT_KEYS)
+        assert created_redis_objects == SET_POSSIBLE_REDIS_OBJECT_KEYS
 
         self.assert_password_and_broker_url_secrets(
             k8s_obj_by_key,
@@ -126,7 +128,7 @@ class RedisTest(unittest.TestCase):
         k8s_obj_by_key = prepare_k8s_lookup_dict(k8s_objects)
 
         created_redis_objects = SET_POSSIBLE_REDIS_OBJECT_KEYS & set(k8s_obj_by_key.keys())
-        self.assertEqual(created_redis_objects, SET_POSSIBLE_REDIS_OBJECT_KEYS)
+        assert created_redis_objects == SET_POSSIBLE_REDIS_OBJECT_KEYS
 
         self.assert_password_and_broker_url_secrets(
             k8s_obj_by_key,
@@ -138,7 +140,7 @@ class RedisTest(unittest.TestCase):
 
     @parameterized.expand(CELERY_EXECUTORS_PARAMS)
     def test_redis_by_chart_password_secret_name_missing_broker_url_secret_name(self, executor):
-        with self.assertRaises(CalledProcessError):
+        with pytest.raises(CalledProcessError):
             render_chart(
                 RELEASE_NAME_REDIS,
                 {
@@ -168,11 +170,10 @@ class RedisTest(unittest.TestCase):
         k8s_obj_by_key = prepare_k8s_lookup_dict(k8s_objects)
 
         created_redis_objects = SET_POSSIBLE_REDIS_OBJECT_KEYS & set(k8s_obj_by_key.keys())
-        self.assertEqual(
-            created_redis_objects,
-            SET_POSSIBLE_REDIS_OBJECT_KEYS
-            - {REDIS_OBJECTS["SECRET_PASSWORD"], REDIS_OBJECTS["SECRET_BROKER_URL"]},
-        )
+        assert created_redis_objects == SET_POSSIBLE_REDIS_OBJECT_KEYS - {
+            REDIS_OBJECTS["SECRET_PASSWORD"],
+            REDIS_OBJECTS["SECRET_BROKER_URL"],
+        }
 
         self.assert_password_and_broker_url_secrets(
             k8s_obj_by_key, expected_password_match=None, expected_broker_url_match=None
@@ -196,7 +197,7 @@ class RedisTest(unittest.TestCase):
         k8s_obj_by_key = prepare_k8s_lookup_dict(k8s_objects)
 
         created_redis_objects = SET_POSSIBLE_REDIS_OBJECT_KEYS & set(k8s_obj_by_key.keys())
-        self.assertEqual(created_redis_objects, {REDIS_OBJECTS["SECRET_BROKER_URL"]})
+        assert created_redis_objects == {REDIS_OBJECTS["SECRET_BROKER_URL"]}
 
         self.assert_password_and_broker_url_secrets(
             k8s_obj_by_key,
@@ -221,7 +222,7 @@ class RedisTest(unittest.TestCase):
         k8s_obj_by_key = prepare_k8s_lookup_dict(k8s_objects)
 
         created_redis_objects = SET_POSSIBLE_REDIS_OBJECT_KEYS & set(k8s_obj_by_key.keys())
-        self.assertEqual(created_redis_objects, set())
+        assert created_redis_objects == set()
 
         self.assert_password_and_broker_url_secrets(
             k8s_obj_by_key, expected_password_match=None, expected_broker_url_match=None
diff --git a/chart/tests/test_scheduler.py b/chart/tests/test_scheduler.py
index eb5225e..4621ce2 100644
--- a/chart/tests/test_scheduler.py
+++ b/chart/tests/test_scheduler.py
@@ -35,7 +35,7 @@ class SchedulerTest(unittest.TestCase):
             show_only=["templates/scheduler/scheduler-deployment.yaml"],
         )
 
-        self.assertEqual("test-volume", jmespath.search("spec.template.spec.volumes[1].name", docs[0]))
-        self.assertEqual(
-            "test-volume", jmespath.search("spec.template.spec.containers[0].volumeMounts[3].name", docs[0])
+        assert "test-volume" == jmespath.search("spec.template.spec.volumes[1].name", docs[0])
+        assert "test-volume" == jmespath.search(
+            "spec.template.spec.containers[0].volumeMounts[3].name", docs[0]
         )
diff --git a/chart/tests/test_worker.py b/chart/tests/test_worker.py
index 9b3515e..4146589 100644
--- a/chart/tests/test_worker.py
+++ b/chart/tests/test_worker.py
@@ -35,7 +35,7 @@ class WorkerTest(unittest.TestCase):
             show_only=["templates/workers/worker-deployment.yaml"],
         )
 
-        self.assertEqual("test-volume", jmespath.search("spec.template.spec.volumes[0].name", docs[0]))
-        self.assertEqual(
-            "test-volume", jmespath.search("spec.template.spec.containers[0].volumeMounts[0].name", docs[0])
+        assert "test-volume" == jmespath.search("spec.template.spec.volumes[0].name", docs[0])
+        assert "test-volume" == jmespath.search(
+            "spec.template.spec.containers[0].volumeMounts[0].name", docs[0]
         )
diff --git a/docs/apache-airflow/best-practices.rst b/docs/apache-airflow/best-practices.rst
index 2289269..2d5b409 100644
--- a/docs/apache-airflow/best-practices.rst
+++ b/docs/apache-airflow/best-practices.rst
@@ -145,9 +145,9 @@ Unit tests ensure that there is no incorrect code in your DAG. You can write uni
 
     def test_dag_loaded(self):
         dag = self.dagbag.get_dag(dag_id='hello_world')
-        self.assertDictEqual(self.dagbag.import_errors, {})
-        self.assertIsNotNone(dag)
-        self.assertEqual(len(dag.tasks), 1)
+        assert self.dagbag.import_errors == {}
+        assert dag is not None
+        assert len(dag.tasks) == 1
 
 **Unit test a DAG structure:**
 This is an example test want to verify the structure of a code-generated DAG against a dict object
@@ -157,12 +157,11 @@ This is an example test want to verify the structure of a code-generated DAG aga
  import unittest
  class testClass(unittest.TestCase):
      def assertDagDictEqual(self,source,dag):
-         self.assertEqual(dag.task_dict.keys(),source.keys())
-         for task_id,downstream_list in source.items():
-             self.assertTrue(dag.has_task(task_id), msg="Missing task_id: {} in dag".format(task_id))
+         assert dag.task_dict.keys() == source.keys()
+         for task_id, downstream_list in source.items():
+             assert dag.has_task(task_id)
              task = dag.get_task(task_id)
-             self.assertEqual(task.downstream_task_ids, set(downstream_list),
-                              msg="unexpected downstream link in {}".format(task_id))
+             assert task.downstream_task_ids == set(downstream_list)
      def test_dag(self):
          self.assertDagDictEqual({
            "DummyInstruction_0": ["DummyInstruction_1"],
@@ -193,8 +192,8 @@ This is an example test want to verify the structure of a code-generated DAG aga
 
     def test_execute_no_trigger(self):
         self.ti.run(ignore_ti_state=True)
-        self.assertEqual(self.ti.state, State.SUCCESS)
-        #Assert something related to tasks results
+        assert self.ti.state == State.SUCCESS
+        # Assert something related to tasks results
 
 Self-Checks
 ------------
@@ -247,7 +246,7 @@ For variable, use :envvar:`AIRFLOW_VAR_{KEY}`.
 .. code-block:: python
 
     with mock.patch.dict('os.environ', AIRFLOW_VAR_KEY="env-value"):
-        self.assertEqual("env-value", Variable.get("key"))
+        assert "env-value" == Variable.get("key")
 
 For connection, use :envvar:`AIRFLOW_CONN_{CONN_ID}`.
 
@@ -260,4 +259,4 @@ For connection, use :envvar:`AIRFLOW_CONN_{CONN_ID}`.
     )
     conn_uri = conn.get_uri()
     with mock.patch.dict("os.environ", AIRFLOW_CONN_MY_CONN=conn_uri):
-      self.assertEqual("cat", Connection.get("my_conn").login)
+      assert "cat" == Connection.get("my_conn").login
diff --git a/kubernetes_tests/test_kubernetes_executor.py b/kubernetes_tests/test_kubernetes_executor.py
index 45f77eb..bd16aef 100644
--- a/kubernetes_tests/test_kubernetes_executor.py
+++ b/kubernetes_tests/test_kubernetes_executor.py
@@ -84,7 +84,7 @@ class TestKubernetesExecutor(unittest.TestCase):
             timeout=1,
         )
 
-        self.assertEqual(response.status_code, 200)
+        assert response.status_code == 200
 
     def setUp(self):
         self.session = self._get_session_with_retries()
@@ -112,7 +112,7 @@ class TestKubernetesExecutor(unittest.TestCase):
                     check_call(["echo", "api returned 404."])
                     tries += 1
                     continue
-                self.assertEqual(result.status_code, 200, "Could not get the status")
+                assert result.status_code == 200, "Could not get the status"
                 result_json = result.json()
                 print(f"Received [monitor_task]#2: {result_json}")
                 state = result_json['state']
@@ -127,7 +127,7 @@ class TestKubernetesExecutor(unittest.TestCase):
                 check_call(["echo", f"api call failed. trying again. error {e}"])
         if state != expected_final_state:
             print(f"The expected state is wrong {state} != {expected_final_state} (expected)!")
-        self.assertEqual(state, expected_final_state)
+        assert state == expected_final_state
 
     def ensure_dag_expected_state(self, host, execution_date, dag_id, expected_final_state, timeout):
         tries = 0
@@ -140,7 +140,7 @@ class TestKubernetesExecutor(unittest.TestCase):
             print(f"Calling {get_string}")
             # Trigger a new dagrun
             result = self.session.get(get_string)
-            self.assertEqual(result.status_code, 200, "Could not get the status")
+            assert result.status_code == 200, "Could not get the status"
             result_json = result.json()
             print(f"Received: {result}")
             state = result_json['state']
@@ -152,7 +152,7 @@ class TestKubernetesExecutor(unittest.TestCase):
             self._describe_resources("airflow")
             self._describe_resources("default")
             tries += 1
-        self.assertEqual(state, expected_final_state)
+        assert state == expected_final_state
 
         # Maybe check if we can retrieve the logs, but then we need to extend the API
 
@@ -165,7 +165,7 @@ class TestKubernetesExecutor(unittest.TestCase):
         except ValueError:
             result_json = str(result)
         print(f"Received [start_dag]#1 {result_json}")
-        self.assertEqual(result.status_code, 200, f"Could not enable DAG: {result_json}")
+        assert result.status_code == 200, f"Could not enable DAG: {result_json}"
         post_string = f'http://{host}/api/experimental/' f'dags/{dag_id}/dag_runs'
         print(f"Calling [start_dag]#2 {post_string}")
         # Trigger a new dagrun
@@ -175,17 +175,15 @@ class TestKubernetesExecutor(unittest.TestCase):
         except ValueError:
             result_json = str(result)
         print(f"Received [start_dag]#2 {result_json}")
-        self.assertEqual(result.status_code, 200, f"Could not trigger a DAG-run: {result_json}")
+        assert result.status_code == 200, f"Could not trigger a DAG-run: {result_json}"
 
         time.sleep(1)
 
         get_string = f'http://{host}/api/experimental/latest_runs'
         print(f"Calling [start_dag]#3 {get_string}")
         result = self.session.get(get_string)
-        self.assertEqual(
-            result.status_code,
-            200,
-            "Could not get the latest DAG-run:" " {result}".format(result=result.json()),
+        assert result.status_code == 200, "Could not get the latest DAG-run:" " {result}".format(
+            result=result.json()
         )
         result_json = result.json()
         print(f"Received: [start_dag]#3 {result_json}")
@@ -193,13 +191,13 @@ class TestKubernetesExecutor(unittest.TestCase):
 
     def start_job_in_kubernetes(self, dag_id, host):
         result_json = self.start_dag(dag_id=dag_id, host=host)
-        self.assertGreater(len(result_json['items']), 0)
+        assert len(result_json['items']) > 0
         execution_date = None
         for dag_run in result_json['items']:
             if dag_run['dag_id'] == dag_id:
                 execution_date = dag_run['execution_date']
                 break
-        self.assertIsNotNone(execution_date, f"No execution_date can be found for the dag with {dag_id}")
+        assert execution_date is not None, f"No execution_date can be found for the dag with {dag_id}"
         return execution_date
 
     def test_integration_run_dag(self):
@@ -264,6 +262,4 @@ class TestKubernetesExecutor(unittest.TestCase):
             timeout=300,
         )
 
-        self.assertEqual(
-            self._num_pods_in_namespace('test-namespace'), 0, "failed to delete pods in other namespace"
-        )
+        assert self._num_pods_in_namespace('test-namespace') == 0, "failed to delete pods in other namespace"
diff --git a/kubernetes_tests/test_kubernetes_pod_operator.py b/kubernetes_tests/test_kubernetes_pod_operator.py
index 49754a2..574f7ff 100644
--- a/kubernetes_tests/test_kubernetes_pod_operator.py
+++ b/kubernetes_tests/test_kubernetes_pod_operator.py
@@ -27,6 +27,7 @@ from unittest import mock
 from unittest.mock import ANY
 
 import pendulum
+import pytest
 from kubernetes.client import models as k8s
 from kubernetes.client.api_client import ApiClient
 from kubernetes.client.rest import ApiException
@@ -136,7 +137,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             do_xcom_push=False,
             config_file=new_config_path,
         )
-        self.assertFalse(k.do_xcom_push)
+        assert not k.do_xcom_push
 
     def test_config_path_move(self):
         new_config_path = '/tmp/kube_config'
@@ -158,7 +159,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         context = create_context(k)
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_working_pod(self):
         k = KubernetesPodOperator(
@@ -175,8 +176,8 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         context = create_context(k)
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
-        self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
-        self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
+        assert self.expected_pod['spec'] == actual_pod['spec']
+        assert self.expected_pod['metadata']['labels'] == actual_pod['metadata']['labels']
 
     def test_delete_operator_pod(self):
         k = KubernetesPodOperator(
@@ -194,8 +195,8 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         context = create_context(k)
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
-        self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
-        self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
+        assert self.expected_pod['spec'] == actual_pod['spec']
+        assert self.expected_pod['metadata']['labels'] == actual_pod['metadata']['labels']
 
     def test_pod_hostnetwork(self):
         k = KubernetesPodOperator(
@@ -214,8 +215,8 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['hostNetwork'] = True
-        self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
-        self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
+        assert self.expected_pod['spec'] == actual_pod['spec']
+        assert self.expected_pod['metadata']['labels'] == actual_pod['metadata']['labels']
 
     def test_pod_dnspolicy(self):
         dns_policy = "ClusterFirstWithHostNet"
@@ -237,8 +238,8 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['hostNetwork'] = True
         self.expected_pod['spec']['dnsPolicy'] = dns_policy
-        self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
-        self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
+        assert self.expected_pod['spec'] == actual_pod['spec']
+        assert self.expected_pod['metadata']['labels'] == actual_pod['metadata']['labels']
 
     def test_pod_schedulername(self):
         scheduler_name = "default-scheduler"
@@ -258,7 +259,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['schedulerName'] = scheduler_name
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_node_selectors(self):
         node_selectors = {'beta.kubernetes.io/os': 'linux'}
@@ -278,7 +279,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['nodeSelector'] = node_selectors
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_resources(self):
         resources = k8s.V1ResourceRequirements(
@@ -304,7 +305,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             'requests': {'memory': '64Mi', 'cpu': '250m', 'ephemeral-storage': '1Gi'},
             'limits': {'memory': '64Mi', 'cpu': 0.25, 'nvidia.com/gpu': None, 'ephemeral-storage': '2Gi'},
         }
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_affinity(self):
         affinity = {
@@ -336,7 +337,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context=context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['affinity'] = affinity
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_port(self):
         port = k8s.V1ContainerPort(
@@ -360,7 +361,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context=context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['containers'][0]['ports'] = [{'name': 'http', 'containerPort': 80}]
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_volume_mount(self):
         with mock.patch.object(PodLauncher, 'log') as mock_logger:
@@ -401,7 +402,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             self.expected_pod['spec']['volumes'] = [
                 {'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}}
             ]
-            self.assertEqual(self.expected_pod, actual_pod)
+            assert self.expected_pod == actual_pod
 
     def test_run_as_user_root(self):
         security_context = {
@@ -425,7 +426,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['securityContext'] = security_context
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_run_as_user_non_root(self):
         security_context = {
@@ -450,7 +451,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['securityContext'] = security_context
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_fs_group(self):
         security_context = {
@@ -475,7 +476,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['securityContext'] = security_context
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_faulty_image(self):
         bad_image_name = "foobar"
@@ -491,12 +492,12 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             do_xcom_push=False,
             startup_timeout_seconds=5,
         )
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             context = create_context(k)
             k.execute(context)
             actual_pod = self.api_client.sanitize_for_serialization(k.pod)
             self.expected_pod['spec']['containers'][0]['image'] = bad_image_name
-            self.assertEqual(self.expected_pod, actual_pod)
+            assert self.expected_pod == actual_pod
 
     def test_faulty_service_account(self):
         bad_service_account_name = "foobar"
@@ -513,12 +514,12 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             startup_timeout_seconds=5,
             service_account_name=bad_service_account_name,
         )
-        with self.assertRaises(ApiException):
+        with pytest.raises(ApiException):
             context = create_context(k)
             k.execute(context)
             actual_pod = self.api_client.sanitize_for_serialization(k.pod)
             self.expected_pod['spec']['serviceAccountName'] = bad_service_account_name
-            self.assertEqual(self.expected_pod, actual_pod)
+            assert self.expected_pod == actual_pod
 
     def test_pod_failure(self):
         """
@@ -536,12 +537,12 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             in_cluster=False,
             do_xcom_push=False,
         )
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             context = create_context(k)
             k.execute(context)
             actual_pod = self.api_client.sanitize_for_serialization(k.pod)
             self.expected_pod['spec']['containers'][0]['args'] = bad_internal_command
-            self.assertEqual(self.expected_pod, actual_pod)
+            assert self.expected_pod == actual_pod
 
     def test_xcom_push(self):
         return_value = '{"foo": "bar"\n, "buzz": 2}'
@@ -558,7 +559,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             do_xcom_push=True,
         )
         context = create_context(k)
-        self.assertEqual(k.execute(context), json.loads(return_value))
+        assert k.execute(context) == json.loads(return_value)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         volume = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME)
         volume_mount = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME_MOUNT)
@@ -567,7 +568,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         self.expected_pod['spec']['containers'][0]['volumeMounts'].insert(0, volume_mount)  # noqa
         self.expected_pod['spec']['volumes'].insert(0, volume)
         self.expected_pod['spec']['containers'].append(container)
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@@ -595,7 +596,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         mock_monitor.return_value = (State.SUCCESS, None)
         context = create_context(k)
         k.execute(context)
-        self.assertEqual(mock_start.call_args[0][0].spec.containers[0].env_from, env_from)
+        assert mock_start.call_args[0][0].spec.containers[0].env_from == env_from
 
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@@ -623,10 +624,9 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         monitor_mock.return_value = (State.SUCCESS, None)
         context = create_context(k)
         k.execute(context)
-        self.assertEqual(
-            start_mock.call_args[0][0].spec.containers[0].env_from,
-            [k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name=secret_ref))],
-        )
+        assert start_mock.call_args[0][0].spec.containers[0].env_from == [
+            k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name=secret_ref))
+        ]
 
     def test_env_vars(self):
         # WHEN
@@ -662,7 +662,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             {'name': 'ENV2', 'value': 'val2'},
             {'name': 'ENV3', 'valueFrom': {'fieldRef': {'fieldPath': 'status.podIP'}}},
         ]
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_template_file_system(self):
         fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
@@ -675,8 +675,8 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
 
         context = create_context(k)
         result = k.execute(context)
-        self.assertIsNotNone(result)
-        self.assertDictEqual(result, {"hello": "world"})
+        assert result is not None
+        assert result == {"hello": "world"}
 
     def test_pod_template_file_with_overrides_system(self):
         fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
@@ -691,10 +691,10 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
 
         context = create_context(k)
         result = k.execute(context)
-        self.assertIsNotNone(result)
-        self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'})
-        self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")])
-        self.assertDictEqual(result, {"hello": "world"})
+        assert result is not None
+        assert k.pod.metadata.labels == {'fizz': 'buzz', 'foo': 'bar'}
+        assert k.pod.spec.containers[0].env == [k8s.V1EnvVar(name="env_name", value="value")]
+        assert result == {"hello": "world"}
 
     def test_pod_template_file_with_full_pod_spec(self):
         fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
@@ -721,10 +721,10 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
 
         context = create_context(k)
         result = k.execute(context)
-        self.assertIsNotNone(result)
-        self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'})
-        self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")])
-        self.assertDictEqual(result, {"hello": "world"})
+        assert result is not None
+        assert k.pod.metadata.labels == {'fizz': 'buzz', 'foo': 'bar'}
+        assert k.pod.spec.containers[0].env == [k8s.V1EnvVar(name="env_name", value="value")]
+        assert result == {"hello": "world"}
 
     def test_full_pod_spec(self):
         pod_spec = k8s.V1Pod(
@@ -753,10 +753,10 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
 
         context = create_context(k)
         result = k.execute(context)
-        self.assertIsNotNone(result)
-        self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'})
-        self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")])
-        self.assertDictEqual(result, {"hello": "world"})
+        assert result is not None
+        assert k.pod.metadata.labels == {'fizz': 'buzz', 'foo': 'bar'}
+        assert k.pod.spec.containers[0].env == [k8s.V1EnvVar(name="env_name", value="value")]
+        assert result == {"hello": "world"}
 
     def test_init_container(self):
         # GIVEN
@@ -811,7 +811,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         self.expected_pod['spec']['volumes'] = [
             {'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}}
         ]
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@@ -842,7 +842,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
               deletion_grace_period_seconds: null\
             """
             ).strip()
-            self.assertTrue(any(line.startswith(expected_line) for line in cm.output))
+            assert any(line.startswith(expected_line) for line in cm.output)
 
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         expected_dict = {
@@ -882,7 +882,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
                 'volumes': [{'emptyDir': {}, 'name': 'xcom'}],
             },
         }
-        self.assertEqual(expected_dict, actual_pod)
+        assert expected_dict == actual_pod
 
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@@ -912,11 +912,11 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['priorityClassName'] = priority_class_name
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_name(self):
         pod_name_too_long = "a" * 221
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             KubernetesPodOperator(
                 namespace='default',
                 image="ubuntu:16.04",
@@ -953,9 +953,9 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         name = k.pod.metadata.name
         pod = client.read_namespaced_pod(name=name, namespace=namespace)
-        self.assertEqual(pod.status.phase, "Running")
+        assert pod.status.phase == "Running"
         k.on_kill()
-        with self.assertRaises(ApiException):
+        with pytest.raises(ApiException):
             pod = client.read_namespaced_pod(name=name, namespace=namespace)
 
     def test_reattach_failing_pod_once(self):
@@ -987,10 +987,10 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             pod = client.read_namespaced_pod(name=name, namespace=namespace)
             while pod.status.phase != "Failed":
                 pod = client.read_namespaced_pod(name=name, namespace=namespace)
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             k.execute(context)
         pod = client.read_namespaced_pod(name=name, namespace=namespace)
-        self.assertEqual(pod.metadata.labels["already_checked"], "True")
+        assert pod.metadata.labels["already_checked"] == "True"
         with mock.patch(
             "airflow.providers.cncf.kubernetes"
             ".operators.kubernetes_pod.KubernetesPodOperator"
diff --git a/kubernetes_tests/test_kubernetes_pod_operator_backcompat.py b/kubernetes_tests/test_kubernetes_pod_operator_backcompat.py
index daefc15..88c7f3e 100644
--- a/kubernetes_tests/test_kubernetes_pod_operator_backcompat.py
+++ b/kubernetes_tests/test_kubernetes_pod_operator_backcompat.py
@@ -24,6 +24,7 @@ from unittest.mock import patch
 
 import kubernetes.client.models as k8s
 import pendulum
+import pytest
 from kubernetes.client.api_client import ApiClient
 from kubernetes.client.rest import ApiException
 
@@ -147,10 +148,9 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         monitor_mock.return_value = (State.SUCCESS, None)
         context = self.create_context(k)
         k.execute(context=context)
-        self.assertEqual(
-            start_mock.call_args[0][0].spec.image_pull_secrets,
-            [k8s.V1LocalObjectReference(name=fake_pull_secrets)],
-        )
+        assert start_mock.call_args[0][0].spec.image_pull_secrets == [
+            k8s.V1LocalObjectReference(name=fake_pull_secrets)
+        ]
 
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@@ -173,7 +173,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             is_delete_operator_pod=True,
         )
         monitor_pod_mock.side_effect = AirflowException('fake failure')
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             context = self.create_context(k)
             k.execute(context=context)
         assert delete_pod_mock.called
@@ -193,8 +193,8 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         context = create_context(k)
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
-        self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
-        self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
+        assert self.expected_pod['spec'] == actual_pod['spec']
+        assert self.expected_pod['metadata']['labels'] == actual_pod['metadata']['labels']
 
     def test_pod_node_selectors(self):
         node_selectors = {'beta.kubernetes.io/os': 'linux'}
@@ -214,7 +214,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['nodeSelector'] = node_selectors
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_resources(self):
         resources = {
@@ -244,7 +244,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             'requests': {'memory': '64Mi', 'cpu': '250m', 'ephemeral-storage': '1Gi'},
             'limits': {'memory': '64Mi', 'cpu': 0.25, 'ephemeral-storage': '2Gi'},
         }
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_affinity(self):
         affinity = {
@@ -276,7 +276,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context=context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['affinity'] = affinity
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_port(self):
         port = Port('http', 80)
@@ -297,7 +297,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context=context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['containers'][0]['ports'] = [{'name': 'http', 'containerPort': 80}]
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_volume_mount(self):
         with patch.object(PodLauncher, 'log') as mock_logger:
@@ -336,7 +336,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             self.expected_pod['spec']['volumes'] = [
                 {'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}}
             ]
-            self.assertEqual(self.expected_pod, actual_pod)
+            assert self.expected_pod == actual_pod
 
     def test_run_as_user_root(self):
         security_context = {
@@ -360,7 +360,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['securityContext'] = security_context
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_run_as_user_non_root(self):
         security_context = {
@@ -385,7 +385,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['securityContext'] = security_context
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_fs_group(self):
         security_context = {
@@ -410,7 +410,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['securityContext'] = security_context
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_faulty_service_account(self):
         bad_service_account_name = "foobar"
@@ -427,12 +427,12 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             startup_timeout_seconds=5,
             service_account_name=bad_service_account_name,
         )
-        with self.assertRaises(ApiException):
+        with pytest.raises(ApiException):
             context = create_context(k)
             k.execute(context)
             actual_pod = self.api_client.sanitize_for_serialization(k.pod)
             self.expected_pod['spec']['serviceAccountName'] = bad_service_account_name
-            self.assertEqual(self.expected_pod, actual_pod)
+            assert self.expected_pod == actual_pod
 
     def test_pod_failure(self):
         """
@@ -450,12 +450,12 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             in_cluster=False,
             do_xcom_push=False,
         )
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             context = create_context(k)
             k.execute(context)
             actual_pod = self.api_client.sanitize_for_serialization(k.pod)
             self.expected_pod['spec']['containers'][0]['args'] = bad_internal_command
-            self.assertEqual(self.expected_pod, actual_pod)
+            assert self.expected_pod == actual_pod
 
     def test_xcom_push(self):
         return_value = '{"foo": "bar"\n, "buzz": 2}'
@@ -472,7 +472,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             do_xcom_push=True,
         )
         context = create_context(k)
-        self.assertEqual(k.execute(context), json.loads(return_value))
+        assert k.execute(context) == json.loads(return_value)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         volume = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME)
         volume_mount = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME_MOUNT)
@@ -481,7 +481,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         self.expected_pod['spec']['containers'][0]['volumeMounts'].insert(0, volume_mount)  # noqa
         self.expected_pod['spec']['volumes'].insert(0, volume)
         self.expected_pod['spec']['containers'].append(container)
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@@ -506,10 +506,9 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         mock_monitor.return_value = (State.SUCCESS, None)
         context = self.create_context(k)
         k.execute(context)
-        self.assertEqual(
-            mock_start.call_args[0][0].spec.containers[0].env_from,
-            [k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmap))],
-        )
+        assert mock_start.call_args[0][0].spec.containers[0].env_from == [
+            k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmap))
+        ]
 
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@@ -535,10 +534,9 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         monitor_mock.return_value = (State.SUCCESS, None)
         context = self.create_context(k)
         k.execute(context)
-        self.assertEqual(
-            start_mock.call_args[0][0].spec.containers[0].env_from,
-            [k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name=secret_ref))],
-        )
+        assert start_mock.call_args[0][0].spec.containers[0].env_from == [
+            k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name=secret_ref))
+        ]
 
     def test_env_vars(self):
         # WHEN
@@ -569,7 +567,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
             {'name': 'ENV2', 'value': 'val2'},
             {'name': 'ENV3', 'valueFrom': {'fieldRef': {'fieldPath': 'status.podIP'}}},
         ]
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_template_file_with_overrides_system(self):
         fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
@@ -584,10 +582,10 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
 
         context = create_context(k)
         result = k.execute(context)
-        self.assertIsNotNone(result)
-        self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'})
-        self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")])
-        self.assertDictEqual(result, {"hello": "world"})
+        assert result is not None
+        assert k.pod.metadata.labels == {'fizz': 'buzz', 'foo': 'bar'}
+        assert k.pod.spec.containers[0].env == [k8s.V1EnvVar(name="env_name", value="value")]
+        assert result == {"hello": "world"}
 
     def test_init_container(self):
         # GIVEN
@@ -641,7 +639,7 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         self.expected_pod['spec']['volumes'] = [
             {'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}}
         ]
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
     @mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@@ -669,11 +667,11 @@ class TestKubernetesPodOperatorSystem(unittest.TestCase):
         k.execute(context)
         actual_pod = self.api_client.sanitize_for_serialization(k.pod)
         self.expected_pod['spec']['priorityClassName'] = priority_class_name
-        self.assertEqual(self.expected_pod, actual_pod)
+        assert self.expected_pod == actual_pod
 
     def test_pod_name(self):
         pod_name_too_long = "a" * 221
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             KubernetesPodOperator(
                 namespace='default',
                 image="ubuntu:16.04",
diff --git a/pylintrc b/pylintrc
index ae2e34a..cd9a060 100644
--- a/pylintrc
+++ b/pylintrc
@@ -154,7 +154,8 @@ disable=print-statement,
         ungrouped-imports,  # Disabled to avoid conflict with isort import order rules, which is enabled in the project.
         missing-module-docstring,
         import-outside-toplevel,    # We import outside toplevel to avoid cyclic imports
-        raise-missing-from  # We don't use raise...from
+        raise-missing-from,  # We don't use raise...from
+        misplaced-comparison-constant
 
 # Enable the message, report, category or checker with the given id(s). You can
 # either give multiple identifier separated by comma (,) or put this option
diff --git a/tests/always/test_example_dags.py b/tests/always/test_example_dags.py
index 339a42a..a21056d 100644
--- a/tests/always/test_example_dags.py
+++ b/tests/always/test_example_dags.py
@@ -32,7 +32,7 @@ NO_DB_QUERY_EXCEPTION = ["/airflow/example_dags/example_subdag_operator.py"]
 class TestExampleDags(unittest.TestCase):
     def test_should_be_importable(self):
         example_dags = list(glob(f"{ROOT_FOLDER}/airflow/**/example_dags/example_*.py", recursive=True))
-        self.assertNotEqual(0, len(example_dags))
+        assert 0 != len(example_dags)
         for filepath in example_dags:
             relative_filepath = os.path.relpath(filepath, ROOT_FOLDER)
             with self.subTest(f"File {relative_filepath} should contain dags"):
@@ -40,8 +40,8 @@ class TestExampleDags(unittest.TestCase):
                     dag_folder=filepath,
                     include_examples=False,
                 )
-                self.assertEqual(0, len(dagbag.import_errors), f"import_errors={str(dagbag.import_errors)}")
-                self.assertGreaterEqual(len(dagbag.dag_ids), 1)
+                assert 0 == len(dagbag.import_errors), f"import_errors={str(dagbag.import_errors)}"
+                assert len(dagbag.dag_ids) >= 1
 
     def test_should_not_do_database_queries(self):
         example_dags = glob(f"{ROOT_FOLDER}/airflow/**/example_dags/example_*.py", recursive=True)
@@ -50,7 +50,7 @@ class TestExampleDags(unittest.TestCase):
             for dag_file in example_dags
             if any(not dag_file.endswith(e) for e in NO_DB_QUERY_EXCEPTION)
         ]
-        self.assertNotEqual(0, len(example_dags))
+        assert 0 != len(example_dags)
         for filepath in example_dags:
             relative_filepath = os.path.relpath(filepath, ROOT_FOLDER)
             with self.subTest(f"File {relative_filepath} shouldn't do database queries"):
diff --git a/tests/always/test_project_structure.py b/tests/always/test_project_structure.py
index d111eb3..66fafc9 100644
--- a/tests/always/test_project_structure.py
+++ b/tests/always/test_project_structure.py
@@ -100,7 +100,7 @@ class TestProjectStructure(unittest.TestCase):
         with self.subTest("Detect missing tests in providers module"):
             expected_missing_test_modules = {pair[1] for pair in expected_missing_providers_modules}
             missing_tests_files = missing_tests_files - set(expected_missing_test_modules)
-            self.assertEqual(set(), missing_tests_files)
+            assert set() == missing_tests_files
 
         with self.subTest("Verify removed deprecated module also removed from deprecated list"):
             expected_missing_modules = {pair[0] for pair in expected_missing_providers_modules}
@@ -247,7 +247,7 @@ class TestGoogleProviderProjectStructure(unittest.TestCase):
         with self.subTest("Detect missing example dags"):
             missing_example = {s for s in operator_sets if not has_example_dag(s)}
             missing_example -= self.MISSING_EXAMPLE_DAGS
-            self.assertEqual(set(), missing_example)
+            assert set() == missing_example
 
         with self.subTest("Keep update missing example dags list"):
             new_example_dag = set(example_sets).intersection(set(self.MISSING_EXAMPLE_DAGS))
@@ -299,7 +299,7 @@ class TestGoogleProviderProjectStructure(unittest.TestCase):
                 print("example_paths=", example_paths)
                 operators_paths = set(get_classes_from_file(f"{ROOT_FOLDER}/{filepath}"))
                 missing_operators.extend(operators_paths - example_paths)
-        self.assertEqual(set(missing_operators), self.MISSING_EXAMPLES_FOR_OPERATORS)
+        assert set(missing_operators) == self.MISSING_EXAMPLES_FOR_OPERATORS
 
     @parameterized.expand(
         itertools.product(["_system.py", "_system_helper.py"], ["operators", "sensors", "transfers"])
@@ -314,7 +314,7 @@ class TestGoogleProviderProjectStructure(unittest.TestCase):
         expected_files = (f.replace(".py", filename_suffix).replace("/test_", "/") for f in expected_files)
         expected_files = {f'{f.rpartition("/")[0]}/test_{f.rpartition("/")[2]}' for f in expected_files}
 
-        self.assertEqual(set(), files - expected_files)
+        assert set() == files - expected_files
 
     @staticmethod
     def find_resource_files(
@@ -346,4 +346,4 @@ class TestOperatorsHooks(unittest.TestCase):
 
         invalid_files = [f for f in files if any(f.endswith(suffix) for suffix in illegal_suffixes)]
 
-        self.assertEqual([], invalid_files)
+        assert [] == invalid_files
diff --git a/tests/api/auth/backend/test_kerberos_auth.py b/tests/api/auth/backend/test_kerberos_auth.py
index ef31679..90113ad 100644
--- a/tests/api/auth/backend/test_kerberos_auth.py
+++ b/tests/api/auth/backend/test_kerberos_auth.py
@@ -64,7 +64,7 @@ class TestApiKerberos(unittest.TestCase):
                 data=json.dumps(dict(run_id='my_run' + datetime.now().isoformat())),
                 content_type="application/json",
             )
-            self.assertEqual(401, response.status_code)
+            assert 401 == response.status_code
 
             response.url = f'http://{socket.getfqdn()}'
 
@@ -81,7 +81,7 @@ class TestApiKerberos(unittest.TestCase):
             CLIENT_AUTH.mutual_authentication = 3
 
             CLIENT_AUTH.handle_response(response)
-            self.assertIn('Authorization', response.request.headers)
+            assert 'Authorization' in response.request.headers
 
             response2 = client.post(
                 url_template.format('example_bash_operator'),
@@ -89,7 +89,7 @@ class TestApiKerberos(unittest.TestCase):
                 content_type="application/json",
                 headers=response.request.headers,
             )
-            self.assertEqual(200, response2.status_code)
+            assert 200 == response2.status_code
 
     def test_unauthorized(self):
         with self.app.test_client() as client:
@@ -100,4 +100,4 @@ class TestApiKerberos(unittest.TestCase):
                 content_type="application/json",
             )
 
-            self.assertEqual(401, response.status_code)
+            assert 401 == response.status_code
diff --git a/tests/api/auth/test_client.py b/tests/api/auth/test_client.py
index 8652b12..cf3c7b5 100644
--- a/tests/api/auth/test_client.py
+++ b/tests/api/auth/test_client.py
@@ -38,7 +38,7 @@ class TestGetCurrentApiClient(unittest.TestCase):
         mock_client.assert_called_once_with(
             api_base_url='http://localhost:1234', auth='CLIENT_AUTH', session=None
         )
-        self.assertEqual(mock_client.return_value, result)
+        assert mock_client.return_value == result
 
     @mock.patch("airflow.api.client.json_client.Client")
     @mock.patch("airflow.providers.google.common.auth_backend.google_openid.create_client_session")
@@ -55,4 +55,4 @@ class TestGetCurrentApiClient(unittest.TestCase):
         mock_client.assert_called_once_with(
             api_base_url='http://localhost:1234', auth=None, session=mock_create_client_session.return_value
         )
-        self.assertEqual(mock_client.return_value, result)
+        assert mock_client.return_value == result
diff --git a/tests/api/client/test_local_client.py b/tests/api/client/test_local_client.py
index d574615..c20a1b9 100644
--- a/tests/api/client/test_local_client.py
+++ b/tests/api/client/test_local_client.py
@@ -20,6 +20,7 @@ import json
 import unittest
 from unittest.mock import ANY, patch
 
+import pytest
 from freezegun import freeze_time
 
 from airflow.api.client.local_client import Client
@@ -60,7 +61,7 @@ class TestLocalClient(unittest.TestCase):
         DagBag(include_examples=True)
 
         # non existent
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             self.client.trigger_dag(dag_id="blablabla")
 
         with freeze_time(EXECDATE):
@@ -118,36 +119,36 @@ class TestLocalClient(unittest.TestCase):
         key = "my_dag_id"
 
         with create_session() as session:
-            self.assertEqual(session.query(DagModel).filter(DagModel.dag_id == key).count(), 0)
+            assert session.query(DagModel).filter(DagModel.dag_id == key).count() == 0
             session.add(DagModel(dag_id=key))
 
         with create_session() as session:
-            self.assertEqual(session.query(DagModel).filter(DagModel.dag_id == key).count(), 1)
+            assert session.query(DagModel).filter(DagModel.dag_id == key).count() == 1
 
             self.client.delete_dag(dag_id=key)
-            self.assertEqual(session.query(DagModel).filter(DagModel.dag_id == key).count(), 0)
+            assert session.query(DagModel).filter(DagModel.dag_id == key).count() == 0
 
     def test_get_pool(self):
         self.client.create_pool(name='foo', slots=1, description='')
         pool = self.client.get_pool(name='foo')
-        self.assertEqual(pool, ('foo', 1, ''))
+        assert pool == ('foo', 1, '')
 
     def test_get_pools(self):
         self.client.create_pool(name='foo1', slots=1, description='')
         self.client.create_pool(name='foo2', slots=2, description='')
         pools = sorted(self.client.get_pools(), key=lambda p: p[0])
-        self.assertEqual(pools, [('default_pool', 128, 'Default pool'), ('foo1', 1, ''), ('foo2', 2, '')])
+        assert pools == [('default_pool', 128, 'Default pool'), ('foo1', 1, ''), ('foo2', 2, '')]
 
     def test_create_pool(self):
         pool = self.client.create_pool(name='foo', slots=1, description='')
-        self.assertEqual(pool, ('foo', 1, ''))
+        assert pool == ('foo', 1, '')
         with create_session() as session:
-            self.assertEqual(session.query(Pool).count(), 2)
+            assert session.query(Pool).count() == 2
 
     def test_delete_pool(self):
         self.client.create_pool(name='foo', slots=1, description='')
         with create_session() as session:
-            self.assertEqual(session.query(Pool).count(), 2)
+            assert session.query(Pool).count() == 2
         self.client.delete_pool(name='foo')
         with create_session() as session:
-            self.assertEqual(session.query(Pool).count(), 1)
+            assert session.query(Pool).count() == 1
diff --git a/tests/api/common/experimental/test_delete_dag.py b/tests/api/common/experimental/test_delete_dag.py
index 4d5b47f..7570cb8 100644
--- a/tests/api/common/experimental/test_delete_dag.py
+++ b/tests/api/common/experimental/test_delete_dag.py
@@ -18,6 +18,8 @@
 
 import unittest
 
+import pytest
+
 from airflow import models
 from airflow.api.common.experimental.delete_dag import delete_dag
 from airflow.exceptions import DagNotFound
@@ -46,7 +48,7 @@ class TestDeleteDAGCatchError(unittest.TestCase):
         self.dag.clear()
 
     def test_delete_dag_non_existent_dag(self):
-        with self.assertRaises(DagNotFound):
+        with pytest.raises(DagNotFound):
             delete_dag("non-existent DAG")
 
 
@@ -112,23 +114,23 @@ class TestDeleteDAGSuccessfulDelete(unittest.TestCase):
 
     def check_dag_models_exists(self):
         with create_session() as session:
-            self.assertEqual(session.query(DM).filter(DM.dag_id == self.key).count(), 1)
-            self.assertEqual(session.query(DR).filter(DR.dag_id == self.key).count(), 1)
-            self.assertEqual(session.query(TI).filter(TI.dag_id == self.key).count(), 1)
-            self.assertEqual(session.query(TF).filter(TF.dag_id == self.key).count(), 1)
-            self.assertEqual(session.query(TR).filter(TR.dag_id == self.key).count(), 1)
-            self.assertEqual(session.query(LOG).filter(LOG.dag_id == self.key).count(), 1)
-            self.assertEqual(session.query(IE).filter(IE.filename == self.dag_file_path).count(), 1)
+            assert session.query(DM).filter(DM.dag_id == self.key).count() == 1
+            assert session.query(DR).filter(DR.dag_id == self.key).count() == 1
+            assert session.query(TI).filter(TI.dag_id == self.key).count() == 1
+            assert session.query(TF).filter(TF.dag_id == self.key).count() == 1
+            assert session.query(TR).filter(TR.dag_id == self.key).count() == 1
+            assert session.query(LOG).filter(LOG.dag_id == self.key).count() == 1
+            assert session.query(IE).filter(IE.filename == self.dag_file_path).count() == 1
 
     def check_dag_models_removed(self, expect_logs=1):
         with create_session() as session:
-            self.assertEqual(session.query(DM).filter(DM.dag_id == self.key).count(), 0)
-            self.assertEqual(session.query(DR).filter(DR.dag_id == self.key).count(), 0)
-            self.assertEqual(session.query(TI).filter(TI.dag_id == self.key).count(), 0)
-            self.assertEqual(session.query(TF).filter(TF.dag_id == self.key).count(), 0)
-            self.assertEqual(session.query(TR).filter(TR.dag_id == self.key).count(), 0)
-            self.assertEqual(session.query(LOG).filter(LOG.dag_id == self.key).count(), expect_logs)
-            self.assertEqual(session.query(IE).filter(IE.filename == self.dag_file_path).count(), 0)
+            assert session.query(DM).filter(DM.dag_id == self.key).count() == 0
+            assert session.query(DR).filter(DR.dag_id == self.key).count() == 0
+            assert session.query(TI).filter(TI.dag_id == self.key).count() == 0
+            assert session.query(TF).filter(TF.dag_id == self.key).count() == 0
+            assert session.query(TR).filter(TR.dag_id == self.key).count() == 0
+            assert session.query(LOG).filter(LOG.dag_id == self.key).count() == expect_logs
+            assert session.query(IE).filter(IE.filename == self.dag_file_path).count() == 0
 
     def test_delete_dag_successful_delete(self):
         self.setup_dag_models()
diff --git a/tests/api/common/experimental/test_mark_tasks.py b/tests/api/common/experimental/test_mark_tasks.py
index 4ff0931..a9f1c86 100644
--- a/tests/api/common/experimental/test_mark_tasks.py
+++ b/tests/api/common/experimental/test_mark_tasks.py
@@ -104,18 +104,18 @@ class TestMarkTasks(unittest.TestCase):
 
         tis = session.query(TI).filter(TI.dag_id == dag.dag_id, TI.execution_date.in_(execution_dates)).all()
 
-        self.assertTrue(len(tis) > 0)
+        assert len(tis) > 0
 
         for ti in tis:  # pylint: disable=too-many-nested-blocks
-            self.assertEqual(ti.operator, dag.get_task(ti.task_id).task_type)
+            assert ti.operator == dag.get_task(ti.task_id).task_type
             if ti.task_id in task_ids and ti.execution_date in execution_dates:
-                self.assertEqual(ti.state, state)
+                assert ti.state == state
                 if state in State.finished:
-                    self.assertIsNotNone(ti.end_date)
+                    assert ti.end_date is not None
             else:
                 for old_ti in old_tis:
                     if old_ti.task_id == ti.task_id and old_ti.execution_date == ti.execution_date:
-                        self.assertEqual(ti.state, old_ti.state)
+                        assert ti.state == old_ti.state
 
     def test_mark_tasks_now(self):
         # set one task to success but do not commit
@@ -131,7 +131,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=False,
         )
-        self.assertEqual(len(altered), 1)
+        assert len(altered) == 1
         self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], None, snapshot)
 
         # set one and only one task to success
@@ -145,7 +145,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 1)
+        assert len(altered) == 1
         self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
 
         # set no tasks
@@ -159,7 +159,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
         self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
 
         # set task to other than success
@@ -173,7 +173,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.FAILED,
             commit=True,
         )
-        self.assertEqual(len(altered), 1)
+        assert len(altered) == 1
         self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.FAILED, snapshot)
 
         # don't alter other tasks
@@ -189,7 +189,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 1)
+        assert len(altered) == 1
         self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
 
         # set one task as FAILED. dag3 has schedule_interval None
@@ -206,7 +206,7 @@ class TestMarkTasks(unittest.TestCase):
             commit=True,
         )
         # exactly one TaskInstance should have been altered
-        self.assertEqual(len(altered), 1)
+        assert len(altered) == 1
         # task should have been marked as failed
         self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[1]], State.FAILED, snapshot)
         # tasks on other days should be unchanged
@@ -231,7 +231,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 3)
+        assert len(altered) == 3
         self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot)
 
     def test_mark_upstream(self):
@@ -252,7 +252,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 4)
+        assert len(altered) == 4
         self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot)
 
     def test_mark_tasks_future(self):
@@ -269,7 +269,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 2)
+        assert len(altered) == 2
         self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
 
         snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
@@ -284,7 +284,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.FAILED,
             commit=True,
         )
-        self.assertEqual(len(altered), 2)
+        assert len(altered) == 2
         self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[0]], None, snapshot)
         self.verify_state(self.dag3, [task.task_id], self.dag3_execution_dates[1:], State.FAILED, snapshot)
 
@@ -302,7 +302,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 2)
+        assert len(altered) == 2
         self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
 
         snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
@@ -317,7 +317,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.FAILED,
             commit=True,
         )
-        self.assertEqual(len(altered), 2)
+        assert len(altered) == 2
         self.verify_state(self.dag3, [task.task_id], self.dag3_execution_dates[:2], State.FAILED, snapshot)
         self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[2]], None, snapshot)
 
@@ -335,7 +335,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 2)
+        assert len(altered) == 2
         self.verify_state(
             self.dag1, [task.task_id for task in tasks], [self.execution_dates[0]], State.SUCCESS, snapshot
         )
@@ -361,7 +361,7 @@ class TestMarkTasks(unittest.TestCase):
             state=State.SUCCESS,
             commit=True,
         )
-        self.assertEqual(len(altered), 14)
+        assert len(altered) == 14
 
         # cannot use snapshot here as that will require drilling down the
         # sub dag tree essentially recreating the same code as in the
@@ -397,19 +397,19 @@ class TestMarkDAGRun(unittest.TestCase):
         dr.get_task_instance('run_this_last').set_state(State.FAILED)
 
     def _verify_task_instance_states_remain_default(self, dr):
-        self.assertEqual(dr.get_task_instance('runme_0').state, State.SUCCESS)
-        self.assertEqual(dr.get_task_instance('runme_1').state, State.SKIPPED)
-        self.assertEqual(dr.get_task_instance('runme_2').state, State.UP_FOR_RETRY)
-        self.assertEqual(dr.get_task_instance('also_run_this').state, State.QUEUED)
-        self.assertEqual(dr.get_task_instance('run_after_loop').state, State.RUNNING)
-        self.assertEqual(dr.get_task_instance('run_this_last').state, State.FAILED)
+        assert dr.get_task_instance('runme_0').state == State.SUCCESS
+        assert dr.get_task_instance('runme_1').state == State.SKIPPED
+        assert dr.get_task_instance('runme_2').state == State.UP_FOR_RETRY
+        assert dr.get_task_instance('also_run_this').state == State.QUEUED
+        assert dr.get_task_instance('run_after_loop').state == State.RUNNING
+        assert dr.get_task_instance('run_this_last').state == State.FAILED
 
     @provide_session
     def _verify_task_instance_states(self, dag, date, state, session=None):
         TI = models.TaskInstance
         tis = session.query(TI).filter(TI.dag_id == dag.dag_id, TI.execution_date == date)
         for ti in tis:
-            self.assertEqual(ti.state, state)
+            assert ti.state == state
 
     def _create_test_dag_run(self, state, date):
         return self.dag1.create_dagrun(run_type=DagRunType.MANUAL, state=state, execution_date=date)
@@ -418,7 +418,7 @@ class TestMarkDAGRun(unittest.TestCase):
         drs = models.DagRun.find(dag_id=dag.dag_id, execution_date=date)
         dr = drs[0]
 
-        self.assertEqual(dr.get_state(), state)
+        assert dr.get_state() == state
 
     @provide_session
     def _verify_dag_run_dates(self, dag, date, state, middle_time, session=None):
@@ -428,13 +428,13 @@ class TestMarkDAGRun(unittest.TestCase):
         dr = session.query(DR).filter(DR.dag_id == dag.dag_id, DR.execution_date == date).one()
         if state == State.RUNNING:
             # Since the DAG is running, the start_date must be updated after creation
-            self.assertGreater(dr.start_date, middle_time)
+            assert dr.start_date > middle_time
             # If the dag is still running, we don't have an end date
-            self.assertIsNone(dr.end_date)
+            assert dr.end_date is None
         else:
             # If the dag is not running, there must be an end time
-            self.assertLess(dr.start_date, middle_time)
-            self.assertGreater(dr.end_date, middle_time)
+            assert dr.start_date < middle_time
+            assert dr.end_date > middle_time
 
     def test_set_running_dag_run_to_success(self):
         date = self.execution_dates[0]
@@ -445,7 +445,7 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
 
         # All except the SUCCESS task should be altered.
-        self.assertEqual(len(altered), 5)
+        assert len(altered) == 5
         self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
         self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
         self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
@@ -459,9 +459,9 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
 
         # Only running task should be altered.
-        self.assertEqual(len(altered), 1)
+        assert len(altered) == 1
         self._verify_dag_run_state(self.dag1, date, State.FAILED)
-        self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
+        assert dr.get_task_instance('run_after_loop').state == State.FAILED
         self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
 
     def test_set_running_dag_run_to_running(self):
@@ -473,7 +473,7 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
 
         # None of the tasks should be altered, only the dag itself
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
         self._verify_dag_run_state(self.dag1, date, State.RUNNING)
         self._verify_task_instance_states_remain_default(dr)
         self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
@@ -487,7 +487,7 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
 
         # All except the SUCCESS task should be altered.
-        self.assertEqual(len(altered), 5)
+        assert len(altered) == 5
         self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
         self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
         self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
@@ -501,9 +501,9 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
 
         # Only running task should be altered.
-        self.assertEqual(len(altered), 1)
+        assert len(altered) == 1
         self._verify_dag_run_state(self.dag1, date, State.FAILED)
-        self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
+        assert dr.get_task_instance('run_after_loop').state == State.FAILED
         self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
 
     def test_set_success_dag_run_to_running(self):
@@ -515,7 +515,7 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
 
         # None of the tasks should be altered, but only the dag object should be changed
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
         self._verify_dag_run_state(self.dag1, date, State.RUNNING)
         self._verify_task_instance_states_remain_default(dr)
         self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
@@ -529,7 +529,7 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
 
         # All except the SUCCESS task should be altered.
-        self.assertEqual(len(altered), 5)
+        assert len(altered) == 5
         self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
         self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
         self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
@@ -543,9 +543,9 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
 
         # Only running task should be altered.
-        self.assertEqual(len(altered), 1)
+        assert len(altered) == 1
         self._verify_dag_run_state(self.dag1, date, State.FAILED)
-        self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
+        assert dr.get_task_instance('run_after_loop').state == State.FAILED
         self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
 
     def test_set_failed_dag_run_to_running(self):
@@ -559,7 +559,7 @@ class TestMarkDAGRun(unittest.TestCase):
         altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
 
         # None of the tasks should be altered, since we've only altered the DAG itself
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
         self._verify_dag_run_state(self.dag1, date, State.RUNNING)
         self._verify_task_instance_states_remain_default(dr)
         self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
@@ -572,21 +572,21 @@ class TestMarkDAGRun(unittest.TestCase):
         will_be_altered = set_dag_run_state_to_running(self.dag1, date, commit=False)
 
         # None of the tasks will be altered.
-        self.assertEqual(len(will_be_altered), 0)
+        assert len(will_be_altered) == 0
         self._verify_dag_run_state(self.dag1, date, State.RUNNING)
         self._verify_task_instance_states_remain_default(dr)
 
         will_be_altered = set_dag_run_state_to_failed(self.dag1, date, commit=False)
 
         # Only the running task will be altered.
-        self.assertEqual(len(will_be_altered), 1)
+        assert len(will_be_altered) == 1
         self._verify_dag_run_state(self.dag1, date, State.RUNNING)
         self._verify_task_instance_states_remain_default(dr)
 
         will_be_altered = set_dag_run_state_to_success(self.dag1, date, commit=False)
 
         # All except the SUCCESS task should be altered.
-        self.assertEqual(len(will_be_altered), 5)
+        assert len(will_be_altered) == 5
         self._verify_dag_run_state(self.dag1, date, State.RUNNING)
         self._verify_task_instance_states_remain_default(dr)
 
@@ -620,7 +620,7 @@ class TestMarkDAGRun(unittest.TestCase):
             count += sum(subdag_counts)
             return count
 
-        self.assertEqual(len(altered), count_dag_tasks(self.dag2))
+        assert len(altered) == count_dag_tasks(self.dag2)
         self._verify_dag_run_state(self.dag2, self.execution_dates[1], State.SUCCESS)
 
         # Make sure other dag status are not changed
@@ -632,29 +632,29 @@ class TestMarkDAGRun(unittest.TestCase):
     def test_set_dag_run_state_edge_cases(self):
         # Dag does not exist
         altered = set_dag_run_state_to_success(None, self.execution_dates[0])
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
         altered = set_dag_run_state_to_failed(None, self.execution_dates[0])
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
         altered = set_dag_run_state_to_running(None, self.execution_dates[0])
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
 
         # Invalid execution date
         altered = set_dag_run_state_to_success(self.dag1, None)
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
         altered = set_dag_run_state_to_failed(self.dag1, None)
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
         altered = set_dag_run_state_to_running(self.dag1, None)
-        self.assertEqual(len(altered), 0)
+        assert len(altered) == 0
 
         # This will throw ValueError since dag.latest_execution_date
         # need to be 0 does not exist.
-        self.assertRaises(
-            ValueError, set_dag_run_state_to_success, self.dag2, timezone.make_naive(self.execution_dates[0])
-        )
+        with pytest.raises(ValueError):
+            set_dag_run_state_to_success(self.dag2, timezone.make_naive(self.execution_dates[0]))
         # altered = set_dag_run_state_to_success(self.dag1, self.execution_dates[0])
         # DagRun does not exist
         # This will throw ValueError since dag.latest_execution_date does not exist
-        self.assertRaises(ValueError, set_dag_run_state_to_success, self.dag2, self.execution_dates[0])
+        with pytest.raises(ValueError):
+            set_dag_run_state_to_success(self.dag2, self.execution_dates[0])
 
     def test_set_dag_run_state_to_failed_no_running_tasks(self):
         """
diff --git a/tests/api/common/experimental/test_pool.py b/tests/api/common/experimental/test_pool.py
index 4338239..ae00226 100644
--- a/tests/api/common/experimental/test_pool.py
+++ b/tests/api/common/experimental/test_pool.py
@@ -20,6 +20,8 @@ import random
 import string
 import unittest
 
+import pytest
+
 from airflow import models
 from airflow.api.common.experimental import pool as pool_api
 from airflow.exceptions import AirflowBadRequest, PoolNotFound
@@ -49,88 +51,82 @@ class TestPool(unittest.TestCase):
 
     def test_get_pool(self):
         pool = pool_api.get_pool(name=self.pools[0].pool)
-        self.assertEqual(pool.pool, self.pools[0].pool)
+        assert pool.pool == self.pools[0].pool
 
     def test_get_pool_non_existing(self):
-        self.assertRaisesRegex(PoolNotFound, "^Pool 'test' doesn't exist$", pool_api.get_pool, name='test')
+        with pytest.raises(PoolNotFound, match="^Pool 'test' doesn't exist$"):
+            pool_api.get_pool(name='test')
 
     def test_get_pool_bad_name(self):
         for name in ('', '    '):
-            self.assertRaisesRegex(
-                AirflowBadRequest, "^Pool name shouldn't be empty$", pool_api.get_pool, name=name
-            )
+            with pytest.raises(AirflowBadRequest, match="^Pool name shouldn't be empty$"):
+                pool_api.get_pool(name=name)
 
     def test_get_pools(self):
         pools = sorted(pool_api.get_pools(), key=lambda p: p.pool)
-        self.assertEqual(pools[0].pool, self.pools[0].pool)
-        self.assertEqual(pools[1].pool, self.pools[1].pool)
+        assert pools[0].pool == self.pools[0].pool
+        assert pools[1].pool == self.pools[1].pool
 
     def test_create_pool(self):
         pool = pool_api.create_pool(name='foo', slots=5, description='')
-        self.assertEqual(pool.pool, 'foo')
-        self.assertEqual(pool.slots, 5)
-        self.assertEqual(pool.description, '')
+        assert pool.pool == 'foo'
+        assert pool.slots == 5
+        assert pool.description == ''
         with create_session() as session:
-            self.assertEqual(session.query(models.Pool).count(), self.TOTAL_POOL_COUNT + 1)
+            assert session.query(models.Pool).count() == self.TOTAL_POOL_COUNT + 1
 
     def test_create_pool_existing(self):
         pool = pool_api.create_pool(name=self.pools[0].pool, slots=5, description='')
-        self.assertEqual(pool.pool, self.pools[0].pool)
-        self.assertEqual(pool.slots, 5)
-        self.assertEqual(pool.description, '')
+        assert pool.pool == self.pools[0].pool
+        assert pool.slots == 5
+        assert pool.description == ''
         with create_session() as session:
-            self.assertEqual(session.query(models.Pool).count(), self.TOTAL_POOL_COUNT)
+            assert session.query(models.Pool).count() == self.TOTAL_POOL_COUNT
 
     def test_create_pool_bad_name(self):
         for name in ('', '    '):
-            self.assertRaisesRegex(
-                AirflowBadRequest,
-                "^Pool name shouldn't be empty$",
-                pool_api.create_pool,
-                name=name,
-                slots=5,
-                description='',
-            )
+            with pytest.raises(AirflowBadRequest, match="^Pool name shouldn't be empty$"):
+                pool_api.create_pool(
+                    name=name,
+                    slots=5,
+                    description='',
+                )
 
     def test_create_pool_name_too_long(self):
         long_name = ''.join(random.choices(string.ascii_lowercase, k=300))
         column_length = models.Pool.pool.property.columns[0].type.length
-        self.assertRaisesRegex(
-            AirflowBadRequest,
-            "^Pool name can't be more than %d characters$" % column_length,
-            pool_api.create_pool,
-            name=long_name,
-            slots=5,
-            description='',
-        )
+        with pytest.raises(
+            AirflowBadRequest, match="^Pool name can't be more than %d characters$" % column_length
+        ):
+            pool_api.create_pool(
+                name=long_name,
+                slots=5,
+                description='',
+            )
 
     def test_create_pool_bad_slots(self):
-        self.assertRaisesRegex(
-            AirflowBadRequest,
-            "^Bad value for `slots`: foo$",
-            pool_api.create_pool,
-            name='foo',
-            slots='foo',
-            description='',
-        )
+        with pytest.raises(AirflowBadRequest, match="^Bad value for `slots`: foo$"):
+            pool_api.create_pool(
+                name='foo',
+                slots='foo',
+                description='',
+            )
 
     def test_delete_pool(self):
         pool = pool_api.delete_pool(name=self.pools[-1].pool)
-        self.assertEqual(pool.pool, self.pools[-1].pool)
+        assert pool.pool == self.pools[-1].pool
         with create_session() as session:
-            self.assertEqual(session.query(models.Pool).count(), self.TOTAL_POOL_COUNT - 1)
+            assert session.query(models.Pool).count() == self.TOTAL_POOL_COUNT - 1
 
     def test_delete_pool_non_existing(self):
-        self.assertRaisesRegex(
-            pool_api.PoolNotFound, "^Pool 'test' doesn't exist$", pool_api.delete_pool, name='test'
-        )
+        with pytest.raises(pool_api.PoolNotFound, match="^Pool 'test' doesn't exist$"):
+            pool_api.delete_pool(name='test')
 
     def test_delete_pool_bad_name(self):
         for name in ('', '    '):
-            self.assertRaisesRegex(
-                AirflowBadRequest, "^Pool name shouldn't be empty$", pool_api.delete_pool, name=name
-            )
+            with pytest.raises(AirflowBadRequest, match="^Pool name shouldn't be empty$"):
+                pool_api.delete_pool(name=name)
 
     def test_delete_default_pool_not_allowed(self):
-        with self.assertRaisesRegex(AirflowBadRequest, "^default_pool cannot be deleted$"):
+        with pytest.raises(AirflowBadRequest, match="^default_pool cannot be deleted$"):
             pool_api.delete_pool(Pool.DEFAULT_POOL_NAME)
diff --git a/tests/api/common/experimental/test_trigger_dag.py b/tests/api/common/experimental/test_trigger_dag.py
index 9fb772d..cbca935 100644
--- a/tests/api/common/experimental/test_trigger_dag.py
+++ b/tests/api/common/experimental/test_trigger_dag.py
@@ -19,6 +19,7 @@
 import unittest
 from unittest import mock
 
+import pytest
 from parameterized import parameterized
 
 from airflow.api.common.experimental.trigger_dag import _trigger_dag
@@ -38,7 +39,7 @@ class TestTriggerDag(unittest.TestCase):
     @mock.patch('airflow.models.DagBag')
     def test_trigger_dag_dag_not_found(self, dag_bag_mock):
         dag_bag_mock.dags = {}
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             _trigger_dag('dag_not_found', dag_bag_mock)
 
     @mock.patch('airflow.api.common.experimental.trigger_dag.DagRun', spec=DagRun)
@@ -49,7 +50,7 @@ class TestTriggerDag(unittest.TestCase):
         dag_bag_mock.dags = [dag_id]
         dag_bag_mock.get_dag.return_value = dag
         dag_run_mock.find.return_value = DagRun()
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             _trigger_dag(dag_id, dag_bag_mock)
 
     @mock.patch('airflow.models.DAG')
@@ -66,7 +67,7 @@ class TestTriggerDag(unittest.TestCase):
 
         triggers = _trigger_dag(dag_id, dag_bag_mock)
 
-        self.assertEqual(3, len(triggers))
+        assert 3 == len(triggers)
 
     @mock.patch('airflow.models.DAG')
     @mock.patch('airflow.api.common.experimental.trigger_dag.DagRun', spec=DagRun)
@@ -82,7 +83,7 @@ class TestTriggerDag(unittest.TestCase):
 
         triggers = _trigger_dag(dag_id, dag_bag_mock)
 
-        self.assertEqual(3, len(triggers))
+        assert 3 == len(triggers)
 
     @mock.patch('airflow.models.DagBag')
     def test_trigger_dag_with_too_early_start_date(self, dag_bag_mock):
@@ -91,7 +92,7 @@ class TestTriggerDag(unittest.TestCase):
         dag_bag_mock.dags = [dag_id]
         dag_bag_mock.get_dag.return_value = dag
 
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             _trigger_dag(dag_id, dag_bag_mock, execution_date=timezone.datetime(2015, 7, 5, 10, 10, 0))
 
     @mock.patch('airflow.models.DagBag')
@@ -124,4 +125,4 @@ class TestTriggerDag(unittest.TestCase):
 
         triggers = _trigger_dag(dag_id, dag_bag_mock, conf=conf)
 
-        self.assertEqual(triggers[0].conf, expected_conf)
+        assert triggers[0].conf == expected_conf
diff --git a/tests/api_connexion/endpoints/test_connection_endpoint.py b/tests/api_connexion/endpoints/test_connection_endpoint.py
index 4ca560e..d092e42 100644
--- a/tests/api_connexion/endpoints/test_connection_endpoint.py
+++ b/tests/api_connexion/endpoints/test_connection_endpoint.py
@@ -87,15 +87,12 @@ class TestDeleteConnection(TestConnectionEndpoint):
             "/api/v1/connections/test-connection", environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 404
-        self.assertEqual(
-            response.json,
-            {
-                'detail': "The Connection with connection_id: `test-connection` was not found",
-                'status': 404,
-                'title': 'Connection not found',
-                'type': EXCEPTIONS_LINK_MAP[404],
-            },
-        )
+        assert response.json == {
+            'detail': "The Connection with connection_id: `test-connection` was not found",
+            'status': 404,
+            'title': 'Connection not found',
+            'type': EXCEPTIONS_LINK_MAP[404],
+        }
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.delete("/api/v1/connections/test-connection")
@@ -128,32 +125,26 @@ class TestGetConnection(TestConnectionEndpoint):
             "/api/v1/connections/test-connection-id", environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 200
-        self.assertEqual(
-            response.json,
-            {
-                "connection_id": "test-connection-id",
-                "conn_type": 'mysql',
-                "host": 'mysql',
-                "login": 'login',
-                'schema': 'testschema',
-                'port': 80,
-            },
-        )
+        assert response.json == {
+            "connection_id": "test-connection-id",
+            "conn_type": 'mysql',
+            "host": 'mysql',
+            "login": 'login',
+            'schema': 'testschema',
+            'port': 80,
+        }
 
     def test_should_respond_404(self):
         response = self.client.get(
             "/api/v1/connections/invalid-connection", environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 404
-        self.assertEqual(
-            {
-                'detail': "The Connection with connection_id: `invalid-connection` was not found",
-                'status': 404,
-                'title': 'Connection not found',
-                'type': EXCEPTIONS_LINK_MAP[404],
-            },
-            response.json,
-        )
+        assert {
+            'detail': "The Connection with connection_id: `invalid-connection` was not found",
+            'status': 404,
+            'title': 'Connection not found',
+            'type': EXCEPTIONS_LINK_MAP[404],
+        } == response.json
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.get("/api/v1/connections/test-connection-id")
@@ -173,30 +164,27 @@ class TestGetConnections(TestConnectionEndpoint):
         assert len(result) == 2
         response = self.client.get("/api/v1/connections", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(
-            response.json,
-            {
-                'connections': [
-                    {
-                        "connection_id": "test-connection-id-1",
-                        "conn_type": 'test_type',
-                        "host": None,
-                        "login": None,
-                        'schema': None,
-                        'port': None,
-                    },
-                    {
-                        "connection_id": "test-connection-id-2",
-                        "conn_type": 'test_type',
-                        "host": None,
-                        "login": None,
-                        'schema': None,
-                        'port': None,
-                    },
-                ],
-                'total_entries': 2,
-            },
-        )
+        assert response.json == {
+            'connections': [
+                {
+                    "connection_id": "test-connection-id-1",
+                    "conn_type": 'test_type',
+                    "host": None,
+                    "login": None,
+                    'schema': None,
+                    'port': None,
+                },
+                {
+                    "connection_id": "test-connection-id-2",
+                    "conn_type": 'test_type',
+                    "host": None,
+                    "login": None,
+                    'schema': None,
+                    'port': None,
+                },
+            ],
+            'total_entries': 2,
+        }
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.get("/api/v1/connections")
@@ -249,9 +237,9 @@ class TestGetConnectionsPagination(TestConnectionEndpoint):
         session.commit()
         response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(response.json["total_entries"], 10)
+        assert response.json["total_entries"] == 10
         conn_ids = [conn["connection_id"] for conn in response.json["connections"] if conn]
-        self.assertEqual(conn_ids, expected_conn_ids)
+        assert conn_ids == expected_conn_ids
 
     @provide_session
     def test_should_respect_page_size_limit_default(self, session):
@@ -262,8 +250,8 @@ class TestGetConnectionsPagination(TestConnectionEndpoint):
         response = self.client.get("/api/v1/connections", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
 
-        self.assertEqual(response.json["total_entries"], 200)
-        self.assertEqual(len(response.json["connections"]), 100)
+        assert response.json["total_entries"] == 200
+        assert len(response.json["connections"]) == 100
 
     @provide_session
     def test_limit_of_zero_should_return_default(self, session):
@@ -274,8 +262,8 @@ class TestGetConnectionsPagination(TestConnectionEndpoint):
         response = self.client.get("/api/v1/connections?limit=0", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
 
-        self.assertEqual(response.json["total_entries"], 200)
-        self.assertEqual(len(response.json["connections"]), 100)
+        assert response.json["total_entries"] == 200
+        assert len(response.json["connections"]) == 100
 
     @provide_session
     @conf_vars({("api", "maximum_page_limit"): "150"})
@@ -286,7 +274,7 @@ class TestGetConnectionsPagination(TestConnectionEndpoint):
 
         response = self.client.get("/api/v1/connections?limit=180", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(len(response.json['connections']), 150)
+        assert len(response.json['connections']) == 150
 
     def _create_connections(self, count):
         return [
@@ -329,19 +317,16 @@ class TestPatchConnection(TestConnectionEndpoint):
         )
         assert response.status_code == 200
         connection = session.query(Connection).filter_by(conn_id=test_connection).first()
-        self.assertEqual(connection.password, None)
-        self.assertEqual(
-            response.json,
-            {
-                "connection_id": test_connection,  # not updated
-                "conn_type": 'test_type',  # Not updated
-                "extra": None,  # Not updated
-                'login': "login",  # updated
-                "port": 80,  # updated
-                "schema": None,
-                "host": None,
-            },
-        )
+        assert connection.password is None
+        assert response.json == {
+            "connection_id": test_connection,  # not updated
+            "conn_type": 'test_type',  # Not updated
+            "extra": None,  # Not updated
+            'login': "login",  # updated
+            "port": 80,  # updated
+            "schema": None,
+            "host": None,
+        }
 
     @parameterized.expand(
         [
@@ -400,7 +385,7 @@ class TestPatchConnection(TestConnectionEndpoint):
             environ_overrides={'REMOTE_USER': "test"},
         )
         assert response.status_code == 400
-        self.assertEqual(response.json['detail'], error_message)
+        assert response.json['detail'] == error_message
 
     @parameterized.expand(
         [
@@ -438,7 +423,7 @@ class TestPatchConnection(TestConnectionEndpoint):
             "/api/v1/connections/test-connection-id", json=payload, environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 400
-        self.assertIn(error_message, response.json['detail'])
+        assert error_message in response.json['detail']
 
     def test_patch_should_respond_404_not_found(self):
         payload = {"connection_id": "test-connection-id", "conn_type": "test-type", "port": 90}
@@ -446,15 +431,12 @@ class TestPatchConnection(TestConnectionEndpoint):
             "/api/v1/connections/test-connection-id", json=payload, environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 404
-        self.assertEqual(
-            {
-                'detail': "The Connection with connection_id: `test-connection-id` was not found",
-                'status': 404,
-                'title': 'Connection not found',
-                'type': EXCEPTIONS_LINK_MAP[404],
-            },
-            response.json,
-        )
+        assert {
+            'detail': "The Connection with connection_id: `test-connection-id` was not found",
+            'status': 404,
+            'title': 'Connection not found',
+            'type': EXCEPTIONS_LINK_MAP[404],
+        } == response.json
 
     @provide_session
     def test_should_raises_401_unauthenticated(self, session):
@@ -478,7 +460,7 @@ class TestPostConnection(TestConnectionEndpoint):
         assert response.status_code == 200
         connection = session.query(Connection).all()
         assert len(connection) == 1
-        self.assertEqual(connection[0].conn_id, 'test-connection-id')
+        assert connection[0].conn_id == 'test-connection-id'
 
     def test_post_should_respond_400_for_invalid_payload(self):
         payload = {
@@ -488,15 +470,12 @@ class TestPostConnection(TestConnectionEndpoint):
             "/api/v1/connections", json=payload, environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 400
-        self.assertEqual(
-            response.json,
-            {
-                'detail': "{'conn_type': ['Missing data for required field.']}",
-                'status': 400,
-                'title': 'Bad Request',
-                'type': EXCEPTIONS_LINK_MAP[400],
-            },
-        )
+        assert response.json == {
+            'detail': "{'conn_type': ['Missing data for required field.']}",
+            'status': 400,
+            'title': 'Bad Request',
+            'type': EXCEPTIONS_LINK_MAP[400],
+        }
 
     def test_post_should_respond_409_already_exist(self):
         payload = {"connection_id": "test-connection-id", "conn_type": 'test_type'}
@@ -509,15 +488,12 @@ class TestPostConnection(TestConnectionEndpoint):
             "/api/v1/connections", json=payload, environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 409
-        self.assertEqual(
-            response.json,
-            {
-                'detail': 'Connection already exist. ID: test-connection-id',
-                'status': 409,
-                'title': 'Conflict',
-                'type': EXCEPTIONS_LINK_MAP[409],
-            },
-        )
+        assert response.json == {
+            'detail': 'Connection already exist. ID: test-connection-id',
+            'status': 409,
+            'title': 'Conflict',
+            'type': EXCEPTIONS_LINK_MAP[409],
+        }
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.post(
diff --git a/tests/api_connexion/endpoints/test_dag_endpoint.py b/tests/api_connexion/endpoints/test_dag_endpoint.py
index 614e8c2..cf75435 100644
--- a/tests/api_connexion/endpoints/test_dag_endpoint.py
+++ b/tests/api_connexion/endpoints/test_dag_endpoint.py
@@ -116,21 +116,18 @@ class TestGetDag(TestDagEndpoint):
         self._create_dag_models(1)
         response = self.client.get("/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(
-            {
-                "dag_id": "TEST_DAG_1",
-                "description": None,
-                "fileloc": "/tmp/dag_1.py",
-                "file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
-                "is_paused": False,
-                "is_subdag": False,
-                "owners": [],
-                "root_dag_id": None,
-                "schedule_interval": {"__type": "CronExpression", "value": "2 2 * * *"},
-                "tags": [],
-            },
-            response.json,
-        )
+        assert {
+            "dag_id": "TEST_DAG_1",
+            "description": None,
+            "fileloc": "/tmp/dag_1.py",
+            "file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
+            "is_paused": False,
+            "is_subdag": False,
+            "owners": [],
+            "root_dag_id": None,
+            "schedule_interval": {"__type": "CronExpression", "value": "2 2 * * *"},
+            "tags": [],
+        } == response.json
 
     @conf_vars({("webserver", "secret_key"): "mysecret"})
     @provide_session
@@ -144,21 +141,18 @@ class TestGetDag(TestDagEndpoint):
         session.commit()
         response = self.client.get("/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(
-            {
-                "dag_id": "TEST_DAG_1",
-                "description": None,
-                "fileloc": "/tmp/dag_1.py",
-                "file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
-                "is_paused": False,
-                "is_subdag": False,
-                "owners": [],
-                "root_dag_id": None,
-                "schedule_interval": None,
-                "tags": [],
-            },
-            response.json,
-        )
+        assert {
+            "dag_id": "TEST_DAG_1",
+            "description": None,
+            "fileloc": "/tmp/dag_1.py",
+            "file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
+            "is_paused": False,
+            "is_subdag": False,
+            "owners": [],
+            "root_dag_id": None,
+            "schedule_interval": None,
+            "tags": [],
+        } == response.json
 
     def test_should_respond_200_with_granular_dag_access(self):
         self._create_dag_models(1)
@@ -331,15 +325,12 @@ class TestGetDagDetails(TestDagEndpoint):
             "/api/v1/dags/non_existing_dag_id/details", environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 404
-        self.assertEqual(
-            response.json,
-            {
-                'detail': 'The DAG with dag_id: non_existing_dag_id was not found',
-                'status': 404,
-                'title': 'DAG not found',
-                'type': EXCEPTIONS_LINK_MAP[404],
-            },
-        )
+        assert response.json == {
+            'detail': 'The DAG with dag_id: non_existing_dag_id was not found',
+            'status': 404,
+            'title': 'DAG not found',
+            'type': EXCEPTIONS_LINK_MAP[404],
+        }
 
 
 class TestGetDags(TestDagEndpoint):
@@ -350,44 +341,41 @@ class TestGetDags(TestDagEndpoint):
         file_token = SERIALIZER.dumps("/tmp/dag_1.py")
         file_token2 = SERIALIZER.dumps("/tmp/dag_2.py")
         assert response.status_code == 200
-        self.assertEqual(
-            {
-                "dags": [
-                    {
-                        "dag_id": "TEST_DAG_1",
-                        "description": None,
-                        "fileloc": "/tmp/dag_1.py",
-                        "file_token": file_token,
-                        "is_paused": False,
-                        "is_subdag": False,
-                        "owners": [],
-                        "root_dag_id": None,
-                        "schedule_interval": {
-                            "__type": "CronExpression",
-                            "value": "2 2 * * *",
-                        },
-                        "tags": [],
+        assert {
+            "dags": [
+                {
+                    "dag_id": "TEST_DAG_1",
+                    "description": None,
+                    "fileloc": "/tmp/dag_1.py",
+                    "file_token": file_token,
+                    "is_paused": False,
+                    "is_subdag": False,
+                    "owners": [],
+                    "root_dag_id": None,
+                    "schedule_interval": {
+                        "__type": "CronExpression",
+                        "value": "2 2 * * *",
                     },
-                    {
-                        "dag_id": "TEST_DAG_2",
-                        "description": None,
-                        "fileloc": "/tmp/dag_2.py",
-                        "file_token": file_token2,
-                        "is_paused": False,
-                        "is_subdag": False,
-                        "owners": [],
-                        "root_dag_id": None,
-                        "schedule_interval": {
-                            "__type": "CronExpression",
-                            "value": "2 2 * * *",
-                        },
-                        "tags": [],
+                    "tags": [],
+                },
+                {
+                    "dag_id": "TEST_DAG_2",
+                    "description": None,
+                    "fileloc": "/tmp/dag_2.py",
+                    "file_token": file_token2,
+                    "is_paused": False,
+                    "is_subdag": False,
+                    "owners": [],
+                    "root_dag_id": None,
+                    "schedule_interval": {
+                        "__type": "CronExpression",
+                        "value": "2 2 * * *",
                     },
-                ],
-                "total_entries": 2,
-            },
-            response.json,
-        )
+                    "tags": [],
+                },
+            ],
+            "total_entries": 2,
+        } == response.json
 
     def test_should_respond_200_with_granular_dag_access(self):
         self._create_dag_models(3)
@@ -435,8 +423,8 @@ class TestGetDags(TestDagEndpoint):
 
         dag_ids = [dag["dag_id"] for dag in response.json["dags"]]
 
-        self.assertEqual(expected_dag_ids, dag_ids)
-        self.assertEqual(10, response.json["total_entries"])
+        assert expected_dag_ids == dag_ids
+        assert 10 == response.json["total_entries"]
 
     def test_should_respond_200_default_limit(self):
         self._create_dag_models(101)
@@ -445,8 +433,8 @@ class TestGetDags(TestDagEndpoint):
 
         assert response.status_code == 200
 
-        self.assertEqual(100, len(response.json["dags"]))
-        self.assertEqual(101, response.json["total_entries"])
+        assert 100 == len(response.json["dags"])
+        assert 101 == response.json["total_entries"]
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.get("api/v1/dags")
@@ -474,7 +462,7 @@ class TestPatchDag(TestDagEndpoint):
             },
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(response.status_code, 200)
+        assert response.status_code == 200
 
         expected_response = {
             "dag_id": "TEST_DAG_1",
@@ -491,7 +479,7 @@ class TestPatchDag(TestDagEndpoint):
             },
             "tags": [],
         }
-        self.assertEqual(response.json, expected_response)
+        assert response.json == expected_response
 
     def test_should_respond_200_on_patch_with_granular_dag_access(self):
         self._create_dag_models(1)
@@ -514,20 +502,17 @@ class TestPatchDag(TestDagEndpoint):
         }
         dag_model = self._create_dag_model()
         response = self.client.patch(f"/api/v1/dags/{dag_model.dag_id}", json=patch_body)
-        self.assertEqual(response.status_code, 400)
-        self.assertEqual(
-            response.json,
-            {
-                'detail': "Property is read-only - 'schedule_interval'",
-                'status': 400,
-                'title': 'Bad Request',
-                'type': EXCEPTIONS_LINK_MAP[400],
-            },
-        )
+        assert response.status_code == 400
+        assert response.json == {
+            'detail': "Property is read-only - 'schedule_interval'",
+            'status': 400,
+            'title': 'Bad Request',
+            'type': EXCEPTIONS_LINK_MAP[400],
+        }
 
     def test_should_respond_404(self):
         response = self.client.get("/api/v1/dags/INVALID_DAG", environ_overrides={'REMOTE_USER': "test"})
-        self.assertEqual(response.status_code, 404)
+        assert response.status_code == 404
 
     @provide_session
     def _create_dag_model(self, session=None):
@@ -559,7 +544,7 @@ class TestPatchDag(TestDagEndpoint):
             environ_overrides={'REMOTE_USER': "test"},
         )
 
-        self.assertEqual(response.status_code, 200)
+        assert response.status_code == 200
         expected_response = {
             "dag_id": "TEST_DAG_1",
             "description": None,
@@ -575,7 +560,7 @@ class TestPatchDag(TestDagEndpoint):
             },
             "tags": [],
         }
-        self.assertEqual(response.json, expected_response)
+        assert response.json == expected_response
 
     @parameterized.expand(
         [
@@ -603,8 +588,8 @@ class TestPatchDag(TestDagEndpoint):
             json=payload,
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(response.status_code, 400)
-        self.assertEqual(response.json['detail'], error_message)
+        assert response.status_code == 400
+        assert response.json['detail'] == error_message
 
     def test_should_respond_403_unauthorized(self):
         dag_model = self._create_dag_model()
diff --git a/tests/api_connexion/endpoints/test_dag_run_endpoint.py b/tests/api_connexion/endpoints/test_dag_run_endpoint.py
index 794b7f2..48960fa 100644
--- a/tests/api_connexion/endpoints/test_dag_run_endpoint.py
+++ b/tests/api_connexion/endpoints/test_dag_run_endpoint.py
@@ -129,27 +129,24 @@ class TestDeleteDagRun(TestDagRunEndpoint):
         response = self.client.delete(
             "api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID_1", environ_overrides={'REMOTE_USER': "test"}
         )
-        self.assertEqual(response.status_code, 204)
+        assert response.status_code == 204
         # Check if the Dag Run is deleted from the database
         response = self.client.get(
             "api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID_1", environ_overrides={'REMOTE_USER': "test"}
         )
-        self.assertEqual(response.status_code, 404)
+        assert response.status_code == 404
 
     def test_should_respond_404(self):
         response = self.client.delete(
             "api/v1/dags/INVALID_DAG_RUN/dagRuns/INVALID_DAG_RUN", environ_overrides={'REMOTE_USER': "test"}
         )
-        self.assertEqual(response.status_code, 404)
-        self.assertEqual(
-            response.json,
-            {
-                "detail": "DAGRun with DAG ID: 'INVALID_DAG_RUN' and DagRun ID: 'INVALID_DAG_RUN' not found",
-                "status": 404,
-                "title": "Not Found",
-                "type": EXCEPTIONS_LINK_MAP[404],
-            },
-        )
+        assert response.status_code == 404
+        assert response.json == {
+            "detail": "DAGRun with DAG ID: 'INVALID_DAG_RUN' and DagRun ID: 'INVALID_DAG_RUN' not found",
+            "status": 404,
+            "title": "Not Found",
+            "type": EXCEPTIONS_LINK_MAP[404],
+        }
 
     @provide_session
     def test_should_raises_401_unauthenticated(self, session):
@@ -361,7 +358,7 @@ class TestGetDagRunsPagination(TestDagRunEndpoint):
             "api/v1/dags/TEST_DAG_ID/dagRuns?limit=180", environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 200
-        self.assertEqual(len(response.json["dag_runs"]), 150)
+        assert len(response.json["dag_runs"]) == 150
 
     def _create_dag_runs(self, count):
         dag_runs = [
@@ -770,7 +767,7 @@ class TestGetDagRunBatchDateFilters(TestDagRunEndpoint):
             "api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 400
-        self.assertEqual(response.json['detail'], expected_response)
+        assert response.json['detail'] == expected_response
 
     @parameterized.expand(
         [
@@ -818,20 +815,17 @@ class TestPostDagRun(TestDagRunEndpoint):
         response = self.client.post(
             "api/v1/dags/TEST_DAG_ID/dagRuns", json=request_json, environ_overrides={'REMOTE_USER': "test"}
         )
-        self.assertEqual(response.status_code, 200)
-        self.assertEqual(
-            {
-                "conf": {},
-                "dag_id": "TEST_DAG_ID",
-                "dag_run_id": response.json["dag_run_id"],
-                "end_date": None,
-                "execution_date": response.json["execution_date"],
-                "external_trigger": True,
-                "start_date": response.json["start_date"],
-                "state": "running",
-            },
-            response.json,
-        )
+        assert response.status_code == 200
+        assert {
+            "conf": {},
+            "dag_id": "TEST_DAG_ID",
+            "dag_run_id": response.json["dag_run_id"],
+            "end_date": None,
+            "execution_date": response.json["execution_date"],
+            "external_trigger": True,
+            "start_date": response.json["start_date"],
+            "state": "running",
+        } == response.json
 
     @parameterized.expand(
         [
@@ -847,8 +841,8 @@ class TestPostDagRun(TestDagRunEndpoint):
         response = self.client.post(
             "api/v1/dags/TEST_DAG_ID/dagRuns", json=data, environ_overrides={'REMOTE_USER': "test"}
         )
-        self.assertEqual(response.status_code, 400)
-        self.assertEqual(response.json['detail'], expected)
+        assert response.status_code == 400
+        assert response.json['detail'] == expected
 
     def test_response_404(self):
         response = self.client.post(
@@ -856,16 +850,13 @@ class TestPostDagRun(TestDagRunEndpoint):
             json={"dag_run_id": "TEST_DAG_RUN", "execution_date": self.default_time},
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(response.status_code, 404)
-        self.assertEqual(
-            {
-                "detail": "DAG with dag_id: 'TEST_DAG_ID' not found",
-                "status": 404,
-                "title": "DAG not found",
-                "type": EXCEPTIONS_LINK_MAP[404],
-            },
-            response.json,
-        )
+        assert response.status_code == 404
+        assert {
+            "detail": "DAG with dag_id: 'TEST_DAG_ID' not found",
+            "status": 404,
+            "title": "DAG not found",
+            "type": EXCEPTIONS_LINK_MAP[404],
+        } == response.json
 
     @parameterized.expand(
         [
@@ -903,8 +894,8 @@ class TestPostDagRun(TestDagRunEndpoint):
         session.add(dag_instance)
         session.commit()
         response = self.client.post(url, json=request_json, environ_overrides={'REMOTE_USER': "test"})
-        self.assertEqual(response.status_code, 400, response.data)
-        self.assertEqual(expected_response, response.json)
+        assert response.status_code == 400, response.data
+        assert expected_response == response.json
 
     def test_response_409(self):
         self._create_test_dag_run()
@@ -916,17 +907,14 @@ class TestPostDagRun(TestDagRunEndpoint):
             },
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(response.status_code, 409, response.data)
-        self.assertEqual(
-            response.json,
-            {
-                "detail": "DAGRun with DAG ID: 'TEST_DAG_ID' and "
-                "DAGRun ID: 'TEST_DAG_RUN_ID_1' already exists",
-                "status": 409,
-                "title": "Conflict",
-                "type": EXCEPTIONS_LINK_MAP[409],
-            },
-        )
+        assert response.status_code == 409, response.data
+        assert response.json == {
+            "detail": "DAGRun with DAG ID: 'TEST_DAG_ID' and "
+            "DAGRun ID: 'TEST_DAG_RUN_ID_1' already exists",
+            "status": 409,
+            "title": "Conflict",
+            "type": EXCEPTIONS_LINK_MAP[409],
+        }
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.post(
diff --git a/tests/api_connexion/endpoints/test_dag_source_endpoint.py b/tests/api_connexion/endpoints/test_dag_source_endpoint.py
index baee620..4ad8236 100644
--- a/tests/api_connexion/endpoints/test_dag_source_endpoint.py
+++ b/tests/api_connexion/endpoints/test_dag_source_endpoint.py
@@ -91,9 +91,9 @@ class TestGetSource(unittest.TestCase):
                 url, headers={"Accept": "text/plain"}, environ_overrides={'REMOTE_USER': "test"}
             )
 
-            self.assertEqual(200, response.status_code)
-            self.assertIn(dag_docstring, response.data.decode())
-            self.assertEqual('text/plain', response.headers['Content-Type'])
+            assert 200 == response.status_code
+            assert dag_docstring in response.data.decode()
+            assert 'text/plain' == response.headers['Content-Type']
 
     @parameterized.expand([(True,), (False,)])
     def test_should_respond_200_json(self, store_dag_code):
@@ -111,9 +111,9 @@ class TestGetSource(unittest.TestCase):
                 url, headers={"Accept": 'application/json'}, environ_overrides={'REMOTE_USER': "test"}
             )
 
-            self.assertEqual(200, response.status_code)
-            self.assertIn(dag_docstring, response.json['content'])
-            self.assertEqual('application/json', response.headers['Content-Type'])
+            assert 200 == response.status_code
+            assert dag_docstring in response.json['content']
+            assert 'application/json' == response.headers['Content-Type']
 
     @parameterized.expand([(True,), (False,)])
     def test_should_respond_406(self, store_dag_code):
@@ -130,7 +130,7 @@ class TestGetSource(unittest.TestCase):
                 url, headers={"Accept": 'image/webp'}, environ_overrides={'REMOTE_USER': "test"}
             )
 
-            self.assertEqual(406, response.status_code)
+            assert 406 == response.status_code
 
     @parameterized.expand([(True,), (False,)])
     def test_should_respond_404(self, store_dag_code):
@@ -143,7 +143,7 @@ class TestGetSource(unittest.TestCase):
                 url, headers={"Accept": 'application/json'}, environ_overrides={'REMOTE_USER': "test"}
             )
 
-            self.assertEqual(404, response.status_code)
+            assert 404 == response.status_code
 
     def test_should_raises_401_unauthenticated(self):
         serializer = URLSafeSerializer(conf.get('webserver', 'SECRET_KEY'))
diff --git a/tests/api_connexion/endpoints/test_event_log_endpoint.py b/tests/api_connexion/endpoints/test_event_log_endpoint.py
index 11cf155..32020cf 100644
--- a/tests/api_connexion/endpoints/test_event_log_endpoint.py
+++ b/tests/api_connexion/endpoints/test_event_log_endpoint.py
@@ -89,27 +89,26 @@ class TestGetEventLog(TestEventLogEndpoint):
             f"/api/v1/eventLogs/{event_log_id}", environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 200
-        self.assertEqual(
-            response.json,
-            {
-                "event_log_id": event_log_id,
-                "event": "TEST_EVENT",
-                "dag_id": "TEST_DAG_ID",
-                "task_id": "TEST_TASK_ID",
-                "execution_date": self.default_time,
-                "owner": 'airflow',
-                "when": self.default_time,
-                "extra": None,
-            },
-        )
+        assert response.json == {
+            "event_log_id": event_log_id,
+            "event": "TEST_EVENT",
+            "dag_id": "TEST_DAG_ID",
+            "task_id": "TEST_TASK_ID",
+            "execution_date": self.default_time,
+            "owner": 'airflow',
+            "when": self.default_time,
+            "extra": None,
+        }
 
     def test_should_respond_404(self):
         response = self.client.get("/api/v1/eventLogs/1", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 404
-        self.assertEqual(
-            {'detail': None, 'status': 404, 'title': 'Event Log not found', 'type': EXCEPTIONS_LINK_MAP[404]},
-            response.json,
-        )
+        assert {
+            'detail': None,
+            'status': 404,
+            'title': 'Event Log not found',
+            'type': EXCEPTIONS_LINK_MAP[404],
+        } == response.json
 
     @provide_session
     def test_should_raises_401_unauthenticated(self, session):
@@ -152,44 +151,41 @@ class TestGetEventLogs(TestEventLogEndpoint):
         session.commit()
         response = self.client.get("/api/v1/eventLogs", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(
-            response.json,
-            {
-                "event_logs": [
-                    {
-                        "event_log_id": log_model_1.id,
-                        "event": "TEST_EVENT_1",
-                        "dag_id": "TEST_DAG_ID",
-                        "task_id": "TEST_TASK_ID",
-                        "execution_date": self.default_time,
-                        "owner": 'airflow',
-                        "when": self.default_time,
-                        "extra": None,
-                    },
-                    {
-                        "event_log_id": log_model_2.id,
-                        "event": "TEST_EVENT_2",
-                        "dag_id": "TEST_DAG_ID",
-                        "task_id": "TEST_TASK_ID",
-                        "execution_date": self.default_time,
-                        "owner": 'airflow',
-                        "when": self.default_time_2,
-                        "extra": None,
-                    },
-                    {
-                        "event_log_id": log_model_3.id,
-                        "event": "cli_scheduler",
-                        "dag_id": None,
-                        "task_id": None,
-                        "execution_date": None,
-                        "owner": 'root',
-                        "when": self.default_time_2,
-                        "extra": '{"host_name": "e24b454f002a"}',
-                    },
-                ],
-                "total_entries": 3,
-            },
-        )
+        assert response.json == {
+            "event_logs": [
+                {
+                    "event_log_id": log_model_1.id,
+                    "event": "TEST_EVENT_1",
+                    "dag_id": "TEST_DAG_ID",
+                    "task_id": "TEST_TASK_ID",
+                    "execution_date": self.default_time,
+                    "owner": 'airflow',
+                    "when": self.default_time,
+                    "extra": None,
+                },
+                {
+                    "event_log_id": log_model_2.id,
+                    "event": "TEST_EVENT_2",
+                    "dag_id": "TEST_DAG_ID",
+                    "task_id": "TEST_TASK_ID",
+                    "execution_date": self.default_time,
+                    "owner": 'airflow',
+                    "when": self.default_time_2,
+                    "extra": None,
+                },
+                {
+                    "event_log_id": log_model_3.id,
+                    "event": "cli_scheduler",
+                    "dag_id": None,
+                    "task_id": None,
+                    "execution_date": None,
+                    "owner": 'root',
+                    "when": self.default_time_2,
+                    "extra": '{"host_name": "e24b454f002a"}',
+                },
+            ],
+            "total_entries": 3,
+        }
 
     @provide_session
     def test_should_raises_401_unauthenticated(self, session):
@@ -258,9 +254,9 @@ class TestGetEventLogPagination(TestEventLogEndpoint):
         response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
 
-        self.assertEqual(response.json["total_entries"], 10)
+        assert response.json["total_entries"] == 10
         events = [event_log["event"] for event_log in response.json["event_logs"]]
-        self.assertEqual(events, expected_events)
+        assert events == expected_events
 
     @provide_session
     def test_should_respect_page_size_limit_default(self, session):
@@ -271,8 +267,8 @@ class TestGetEventLogPagination(TestEventLogEndpoint):
         response = self.client.get("/api/v1/eventLogs", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
 
-        self.assertEqual(response.json["total_entries"], 200)
-        self.assertEqual(len(response.json["event_logs"]), 100)  # default 100
+        assert response.json["total_entries"] == 200
+        assert len(response.json["event_logs"]) == 100  # default 100
 
     @provide_session
     @conf_vars({("api", "maximum_page_limit"): "150"})
@@ -283,7 +279,7 @@ class TestGetEventLogPagination(TestEventLogEndpoint):
 
         response = self.client.get("/api/v1/eventLogs?limit=180", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(len(response.json['event_logs']), 150)
+        assert len(response.json['event_logs']) == 150
 
     def _create_event_logs(self, count):
         return [
diff --git a/tests/api_connexion/endpoints/test_extra_link_endpoint.py b/tests/api_connexion/endpoints/test_extra_link_endpoint.py
index d67e3cc..3864d1f 100644
--- a/tests/api_connexion/endpoints/test_extra_link_endpoint.py
+++ b/tests/api_connexion/endpoints/test_extra_link_endpoint.py
@@ -129,16 +129,13 @@ class TestGetExtraLinks(unittest.TestCase):
         del name
         response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
 
-        self.assertEqual(404, response.status_code)
-        self.assertEqual(
-            {
-                "detail": expected_detail,
-                "status": 404,
-                "title": expected_title,
-                "type": EXCEPTIONS_LINK_MAP[404],
-            },
-            response.json,
-        )
+        assert 404 == response.status_code
+        assert {
+            "detail": expected_detail,
+            "status": 404,
+            "title": expected_title,
+            "type": EXCEPTIONS_LINK_MAP[404],
+        } == response.json
 
     def test_should_raise_403_forbidden(self):
         response = self.client.get(
@@ -161,10 +158,10 @@ class TestGetExtraLinks(unittest.TestCase):
             environ_overrides={'REMOTE_USER': "test"},
         )
 
-        self.assertEqual(200, response.status_code, response.data)
-        self.assertEqual(
-            {"BigQuery Console": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID"}, response.json
-        )
+        assert 200 == response.status_code, response.data
+        assert {
+            "BigQuery Console": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID"
+        } == response.json
 
     @mock_plugin_manager(plugins=[])
     def test_should_respond_200_missing_xcom(self):
@@ -173,11 +170,8 @@ class TestGetExtraLinks(unittest.TestCase):
             environ_overrides={'REMOTE_USER': "test"},
         )
 
-        self.assertEqual(200, response.status_code, response.data)
-        self.assertEqual(
-            {"BigQuery Console": None},
-            response.json,
-        )
+        assert 200 == response.status_code, response.data
+        assert {"BigQuery Console": None} == response.json
 
     @mock_plugin_manager(plugins=[])
     def test_should_respond_200_multiple_links(self):
@@ -193,14 +187,11 @@ class TestGetExtraLinks(unittest.TestCase):
             environ_overrides={'REMOTE_USER': "test"},
         )
 
-        self.assertEqual(200, response.status_code, response.data)
-        self.assertEqual(
-            {
-                "BigQuery Console #1": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID_1",
-                "BigQuery Console #2": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID_2",
-            },
-            response.json,
-        )
+        assert 200 == response.status_code, response.data
+        assert {
+            "BigQuery Console #1": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID_1",
+            "BigQuery Console #2": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID_2",
+        } == response.json
 
     @mock_plugin_manager(plugins=[])
     def test_should_respond_200_multiple_links_missing_xcom(self):
@@ -209,11 +200,8 @@ class TestGetExtraLinks(unittest.TestCase):
             environ_overrides={'REMOTE_USER': "test"},
         )
 
-        self.assertEqual(200, response.status_code, response.data)
-        self.assertEqual(
-            {"BigQuery Console #1": None, "BigQuery Console #2": None},
-            response.json,
-        )
+        assert 200 == response.status_code, response.data
+        assert {"BigQuery Console #1": None, "BigQuery Console #2": None} == response.json
 
     def test_should_respond_200_support_plugins(self):
         class GoogleLink(BaseOperatorLink):
@@ -248,15 +236,12 @@ class TestGetExtraLinks(unittest.TestCase):
                 environ_overrides={'REMOTE_USER': "test"},
             )
 
-            self.assertEqual(200, response.status_code, response.data)
-            self.assertEqual(
-                {
-                    "BigQuery Console": None,
-                    "Google": "https://www.google.com",
-                    "S3": (
-                        "https://s3.amazonaws.com/airflow-logs/"
-                        "TEST_DAG_ID/TEST_SINGLE_QUERY/2020-01-01T00%3A00%3A00%2B00%3A00"
-                    ),
-                },
-                response.json,
-            )
+            assert 200 == response.status_code, response.data
+            assert {
+                "BigQuery Console": None,
+                "Google": "https://www.google.com",
+                "S3": (
+                    "https://s3.amazonaws.com/airflow-logs/"
+                    "TEST_DAG_ID/TEST_SINGLE_QUERY/2020-01-01T00%3A00%3A00%2B00%3A00"
+                ),
+            } == response.json
diff --git a/tests/api_connexion/endpoints/test_health_endpoint.py b/tests/api_connexion/endpoints/test_health_endpoint.py
index 9bc6006..defb97b 100644
--- a/tests/api_connexion/endpoints/test_health_endpoint.py
+++ b/tests/api_connexion/endpoints/test_health_endpoint.py
@@ -58,11 +58,11 @@ class TestGetHeath(TestHealthTestBase):
         )
         session.commit()
         resp_json = self.client.get("/api/v1/health").json
-        self.assertEqual("healthy", resp_json["metadatabase"]["status"])
-        self.assertEqual("healthy", resp_json["scheduler"]["status"])
-        self.assertEqual(
-            last_scheduler_heartbeat_for_testing_1.isoformat(),
-            resp_json["scheduler"]["latest_scheduler_heartbeat"],
+        assert "healthy" == resp_json["metadatabase"]["status"]
+        assert "healthy" == resp_json["scheduler"]["status"]
+        assert (
+            last_scheduler_heartbeat_for_testing_1.isoformat()
+            == resp_json["scheduler"]["latest_scheduler_heartbeat"]
         )
 
     @provide_session
@@ -77,22 +77,22 @@ class TestGetHeath(TestHealthTestBase):
         )
         session.commit()
         resp_json = self.client.get("/api/v1/health").json
-        self.assertEqual("healthy", resp_json["metadatabase"]["status"])
-        self.assertEqual("unhealthy", resp_json["scheduler"]["status"])
-        self.assertEqual(
-            last_scheduler_heartbeat_for_testing_2.isoformat(),
-            resp_json["scheduler"]["latest_scheduler_heartbeat"],
+        assert "healthy" == resp_json["metadatabase"]["status"]
+        assert "unhealthy" == resp_json["scheduler"]["status"]
+        assert (
+            last_scheduler_heartbeat_for_testing_2.isoformat()
+            == resp_json["scheduler"]["latest_scheduler_heartbeat"]
         )
 
     def test_unhealthy_scheduler_no_job(self):
         resp_json = self.client.get("/api/v1/health").json
-        self.assertEqual("healthy", resp_json["metadatabase"]["status"])
-        self.assertEqual("unhealthy", resp_json["scheduler"]["status"])
-        self.assertIsNone(resp_json["scheduler"]["latest_scheduler_heartbeat"])
+        assert "healthy" == resp_json["metadatabase"]["status"]
+        assert "unhealthy" == resp_json["scheduler"]["status"]
+        assert resp_json["scheduler"]["latest_scheduler_heartbeat"] is None
 
     @mock.patch("airflow.api_connexion.endpoints.health_endpoint.SchedulerJob.most_recent_job")
     def test_unhealthy_metadatabase_status(self, mock_scheduler_most_recent_job):
         mock_scheduler_most_recent_job.side_effect = Exception
         resp_json = self.client.get("/api/v1/health").json
-        self.assertEqual("unhealthy", resp_json["metadatabase"]["status"])
-        self.assertIsNone(resp_json["scheduler"]["latest_scheduler_heartbeat"])
+        assert "unhealthy" == resp_json["metadatabase"]["status"]
+        assert resp_json["scheduler"]["latest_scheduler_heartbeat"] is None
diff --git a/tests/api_connexion/endpoints/test_import_error_endpoint.py b/tests/api_connexion/endpoints/test_import_error_endpoint.py
index 66eef60..c8c144b 100644
--- a/tests/api_connexion/endpoints/test_import_error_endpoint.py
+++ b/tests/api_connexion/endpoints/test_import_error_endpoint.py
@@ -81,28 +81,22 @@ class TestGetImportErrorEndpoint(TestBaseImportError):
         assert response.status_code == 200
         response_data = response.json
         response_data["import_error_id"] = 1
-        self.assertEqual(
-            {
-                "filename": "Lorem_ipsum.py",
-                "import_error_id": 1,
-                "stack_trace": "Lorem ipsum",
-                "timestamp": "2020-06-10T12:00:00+00:00",
-            },
-            response_data,
-        )
+        assert {
+            "filename": "Lorem_ipsum.py",
+            "import_error_id": 1,
+            "stack_trace": "Lorem ipsum",
+            "timestamp": "2020-06-10T12:00:00+00:00",
+        } == response_data
 
     def test_response_404(self):
         response = self.client.get("/api/v1/importErrors/2", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 404
-        self.assertEqual(
-            {
-                "detail": "The ImportError with import_error_id: `2` was not found",
-                "status": 404,
-                "title": "Import error not found",
-                "type": EXCEPTIONS_LINK_MAP[404],
-            },
-            response.json,
-        )
+        assert {
+            "detail": "The ImportError with import_error_id: `2` was not found",
+            "status": 404,
+            "title": "Import error not found",
+            "type": EXCEPTIONS_LINK_MAP[404],
+        } == response.json
 
     @provide_session
     def test_should_raises_401_unauthenticated(self, session):
@@ -144,26 +138,23 @@ class TestGetImportErrorsEndpoint(TestBaseImportError):
         assert response.status_code == 200
         response_data = response.json
         self._normalize_import_errors(response_data['import_errors'])
-        self.assertEqual(
-            {
-                "import_errors": [
-                    {
-                        "filename": "Lorem_ipsum.py",
-                        "import_error_id": 1,
-                        "stack_trace": "Lorem ipsum",
-                        "timestamp": "2020-06-10T12:00:00+00:00",
-                    },
-                    {
-                        "filename": "Lorem_ipsum.py",
-                        "import_error_id": 2,
-                        "stack_trace": "Lorem ipsum",
-                        "timestamp": "2020-06-10T12:00:00+00:00",
-                    },
-                ],
-                "total_entries": 2,
-            },
-            response_data,
-        )
+        assert {
+            "import_errors": [
+                {
+                    "filename": "Lorem_ipsum.py",
+                    "import_error_id": 1,
+                    "stack_trace": "Lorem ipsum",
+                    "timestamp": "2020-06-10T12:00:00+00:00",
+                },
+                {
+                    "filename": "Lorem_ipsum.py",
+                    "import_error_id": 2,
+                    "stack_trace": "Lorem ipsum",
+                    "timestamp": "2020-06-10T12:00:00+00:00",
+                },
+            ],
+            "total_entries": 2,
+        } == response_data
 
     @provide_session
     def test_should_raises_401_unauthenticated(self, session):
@@ -213,7 +204,7 @@ class TestGetImportErrorsEndpointPagination(TestBaseImportError):
 
         assert response.status_code == 200
         import_ids = [pool["filename"] for pool in response.json["import_errors"]]
-        self.assertEqual(import_ids, expected_import_error_ids)
+        assert import_ids == expected_import_error_ids
 
     @provide_session
     def test_should_respect_page_size_limit_default(self, session):
@@ -229,7 +220,7 @@ class TestGetImportErrorsEndpointPagination(TestBaseImportError):
         session.commit()
         response = self.client.get("/api/v1/importErrors", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(len(response.json['import_errors']), 100)
+        assert len(response.json['import_errors']) == 100
 
     @provide_session
     @conf_vars({("api", "maximum_page_limit"): "150"})
@@ -248,4 +239,4 @@ class TestGetImportErrorsEndpointPagination(TestBaseImportError):
             "/api/v1/importErrors?limit=180", environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 200
-        self.assertEqual(len(response.json['import_errors']), 150)
+        assert len(response.json['import_errors']) == 150
diff --git a/tests/api_connexion/endpoints/test_log_endpoint.py b/tests/api_connexion/endpoints/test_log_endpoint.py
index 094fc5f..509cbe7 100644
--- a/tests/api_connexion/endpoints/test_log_endpoint.py
+++ b/tests/api_connexion/endpoints/test_log_endpoint.py
@@ -167,13 +167,13 @@ class TestGetLog(unittest.TestCase):
         expected_filename = "{}/{}/{}/{}/1.log".format(
             self.log_dir, self.DAG_ID, self.TASK_ID, self.default_time.replace(":", ".")
         )
-        self.assertEqual(
-            response.json['content'],
-            f"[('', '*** Reading local file: {expected_filename}\\nLog for testing.')]",
+        assert (
+            response.json['content']
+            == f"[('', '*** Reading local file: {expected_filename}\\nLog for testing.')]"
         )
         info = serializer.loads(response.json['continuation_token'])
-        self.assertEqual(info, {'end_of_log': True})
-        self.assertEqual(200, response.status_code)
+        assert info == {'end_of_log': True}
+        assert 200 == response.status_code
 
     @provide_session
     def test_should_respond_200_text_plain(self, session):
@@ -191,10 +191,10 @@ class TestGetLog(unittest.TestCase):
         expected_filename = "{}/{}/{}/{}/1.log".format(
             self.log_dir, self.DAG_ID, self.TASK_ID, self.default_time.replace(':', '.')
         )
-        self.assertEqual(200, response.status_code)
-        self.assertEqual(
-            response.data.decode('utf-8'),
-            f"\n*** Reading local file: {expected_filename}\nLog for testing.\n",
+        assert 200 == response.status_code
+        assert (
+            response.data.decode('utf-8')
+            == f"\n*** Reading local file: {expected_filename}\nLog for testing.\n"
         )
 
     @provide_session
@@ -209,8 +209,8 @@ class TestGetLog(unittest.TestCase):
             f"taskInstances/Invalid-Task-ID/logs/1?token={token}",
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(response.status_code, 400)
-        self.assertEqual(response.json['detail'], "Task instance did not exist in the DB")
+        assert response.status_code == 400
+        assert response.json['detail'] == "Task instance did not exist in the DB"
 
     @provide_session
     def test_get_logs_with_metadata_as_download_large_file(self, session):
@@ -229,10 +229,10 @@ class TestGetLog(unittest.TestCase):
                 environ_overrides={'REMOTE_USER': "test"},
             )
 
-            self.assertIn('1st line', response.data.decode('utf-8'))
-            self.assertIn('2nd line', response.data.decode('utf-8'))
-            self.assertIn('3rd line', response.data.decode('utf-8'))
-            self.assertNotIn('should never be read', response.data.decode('utf-8'))
+            assert '1st line' in response.data.decode('utf-8')
+            assert '2nd line' in response.data.decode('utf-8')
+            assert '3rd line' in response.data.decode('utf-8')
+            assert 'should never be read' not in response.data.decode('utf-8')
 
     @mock.patch("airflow.api_connexion.endpoints.log_endpoint.TaskLogReader")
     def test_get_logs_for_handler_without_read_method(self, mock_log_reader):
@@ -249,8 +249,8 @@ class TestGetLog(unittest.TestCase):
             headers={'Content-Type': 'application/jso'},
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(400, response.status_code)
-        self.assertIn('Task log handler does not support read logs.', response.data.decode('utf-8'))
+        assert 400 == response.status_code
+        assert 'Task log handler does not support read logs.' in response.data.decode('utf-8')
 
     @provide_session
     def test_bad_signature_raises(self, session):
@@ -263,15 +263,12 @@ class TestGetLog(unittest.TestCase):
             headers={'Accept': 'application/json'},
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(
-            response.json,
-            {
-                'detail': None,
-                'status': 400,
-                'title': "Bad Signature. Please use only the tokens provided by the API.",
-                'type': EXCEPTIONS_LINK_MAP[400],
-            },
-        )
+        assert response.json == {
+            'detail': None,
+            'status': 400,
+            'title': "Bad Signature. Please use only the tokens provided by the API.",
+            'type': EXCEPTIONS_LINK_MAP[400],
+        }
 
     def test_raises_404_for_invalid_dag_run_id(self):
         response = self.client.get(
@@ -280,10 +277,12 @@ class TestGetLog(unittest.TestCase):
             headers={'Accept': 'application/json'},
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(
-            response.json,
-            {'detail': None, 'status': 404, 'title': "DAG Run not found", 'type': EXCEPTIONS_LINK_MAP[404]},
-        )
+        assert response.json == {
+            'detail': None,
+            'status': 404,
+            'title': "DAG Run not found",
+            'type': EXCEPTIONS_LINK_MAP[404],
+        }
 
     def test_should_raises_401_unauthenticated(self):
         key = self.app.config["SECRET_KEY"]
diff --git a/tests/api_connexion/endpoints/test_pool_endpoint.py b/tests/api_connexion/endpoints/test_pool_endpoint.py
index 16e8607..7bf7071 100644
--- a/tests/api_connexion/endpoints/test_pool_endpoint.py
+++ b/tests/api_connexion/endpoints/test_pool_endpoint.py
@@ -72,30 +72,27 @@ class TestGetPools(TestBasePoolEndpoints):
         assert len(result) == 2  # accounts for the default pool as well
         response = self.client.get("/api/v1/pools", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(
-            {
-                "pools": [
-                    {
-                        "name": "default_pool",
-                        "slots": 128,
-                        "occupied_slots": 0,
-                        "running_slots": 0,
-                        "queued_slots": 0,
-                        "open_slots": 128,
-                    },
-                    {
-                        "name": "test_pool_a",
-                        "slots": 3,
-                        "occupied_slots": 0,
-                        "running_slots": 0,
-                        "queued_slots": 0,
-                        "open_slots": 3,
-                    },
-                ],
-                "total_entries": 2,
-            },
-            response.json,
-        )
+        assert {
+            "pools": [
+                {
+                    "name": "default_pool",
+                    "slots": 128,
+                    "occupied_slots": 0,
+                    "running_slots": 0,
+                    "queued_slots": 0,
+                    "open_slots": 128,
+                },
+                {
+                    "name": "test_pool_a",
+                    "slots": 3,
+                    "occupied_slots": 0,
+                    "running_slots": 0,
+                    "queued_slots": 0,
+                    "open_slots": 3,
+                },
+            ],
+            "total_entries": 2,
+        } == response.json
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.get("/api/v1/pools")
@@ -134,11 +131,11 @@ class TestGetPoolsPagination(TestBasePoolEndpoints):
         session.add_all(pools)
         session.commit()
         result = session.query(Pool).count()
-        self.assertEqual(result, 121)  # accounts for default pool as well
+        assert result == 121  # accounts for default pool as well
         response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
         pool_ids = [pool["name"] for pool in response.json["pools"]]
-        self.assertEqual(pool_ids, expected_pool_ids)
+        assert pool_ids == expected_pool_ids
 
     @provide_session
     def test_should_respect_page_size_limit_default(self, session):
@@ -146,10 +143,10 @@ class TestGetPoolsPagination(TestBasePoolEndpoints):
         session.add_all(pools)
         session.commit()
         result = session.query(Pool).count()
-        self.assertEqual(result, 121)
+        assert result == 121
         response = self.client.get("/api/v1/pools", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(len(response.json['pools']), 100)
+        assert len(response.json['pools']) == 100
 
     @provide_session
     @conf_vars({("api", "maximum_page_limit"): "150"})
@@ -158,10 +155,10 @@ class TestGetPoolsPagination(TestBasePoolEndpoints):
         session.add_all(pools)
         session.commit()
         result = session.query(Pool).count()
-        self.assertEqual(result, 200)
+        assert result == 200
         response = self.client.get("/api/v1/pools?limit=180", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(len(response.json['pools']), 150)
+        assert len(response.json['pools']) == 150
 
 
 class TestGetPool(TestBasePoolEndpoints):
@@ -172,30 +169,24 @@ class TestGetPool(TestBasePoolEndpoints):
         session.commit()
         response = self.client.get("/api/v1/pools/test_pool_a", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(
-            {
-                "name": "test_pool_a",
-                "slots": 3,
-                "occupied_slots": 0,
-                "running_slots": 0,
-                "queued_slots": 0,
-                "open_slots": 3,
-            },
-            response.json,
-        )
+        assert {
+            "name": "test_pool_a",
+            "slots": 3,
+            "occupied_slots": 0,
+            "running_slots": 0,
+            "queued_slots": 0,
+            "open_slots": 3,
+        } == response.json
 
     def test_response_404(self):
         response = self.client.get("/api/v1/pools/invalid_pool", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 404
-        self.assertEqual(
-            {
-                "detail": "Pool with name:'invalid_pool' not found",
-                "status": 404,
-                "title": "Not Found",
-                "type": EXCEPTIONS_LINK_MAP[404],
-            },
-            response.json,
-        )
+        assert {
+            "detail": "Pool with name:'invalid_pool' not found",
+            "status": 404,
+            "title": "Not Found",
+            "type": EXCEPTIONS_LINK_MAP[404],
+        } == response.json
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.get("/api/v1/pools/default_pool")
@@ -215,20 +206,17 @@ class TestDeletePool(TestBasePoolEndpoints):
         assert response.status_code == 204
         # Check if the pool is deleted from the db
         response = self.client.get(f"api/v1/pools/{pool_name}", environ_overrides={'REMOTE_USER': "test"})
-        self.assertEqual(response.status_code, 404)
+        assert response.status_code == 404
 
     def test_response_404(self):
         response = self.client.delete("api/v1/pools/invalid_pool", environ_overrides={'REMOTE_USER': "test"})
-        self.assertEqual(response.status_code, 404)
-        self.assertEqual(
-            {
-                "detail": "Pool with name:'invalid_pool' not found",
-                "status": 404,
-                "title": "Not Found",
-                "type": EXCEPTIONS_LINK_MAP[404],
-            },
-            response.json,
-        )
+        assert response.status_code == 404
+        assert {
+            "detail": "Pool with name:'invalid_pool' not found",
+            "status": 404,
+            "title": "Not Found",
+            "type": EXCEPTIONS_LINK_MAP[404],
+        } == response.json
 
     @provide_session
     def test_should_raises_401_unauthenticated(self, session):
@@ -254,17 +242,14 @@ class TestPostPool(TestBasePoolEndpoints):
             environ_overrides={'REMOTE_USER': "test"},
         )
         assert response.status_code == 200
-        self.assertEqual(
-            {
-                "name": "test_pool_a",
-                "slots": 3,
-                "occupied_slots": 0,
-                "running_slots": 0,
-                "queued_slots": 0,
-                "open_slots": 3,
-            },
-            response.json,
-        )
+        assert {
+            "name": "test_pool_a",
+            "slots": 3,
+            "occupied_slots": 0,
+            "running_slots": 0,
+            "queued_slots": 0,
+            "open_slots": 3,
+        } == response.json
 
     @provide_session
     def test_response_409(self, session):
@@ -278,15 +263,12 @@ class TestPostPool(TestBasePoolEndpoints):
             environ_overrides={'REMOTE_USER': "test"},
         )
         assert response.status_code == 409
-        self.assertEqual(
-            {
-                "detail": f"Pool: {pool_name} already exists",
-                "status": 409,
-                "title": "Conflict",
-                "type": EXCEPTIONS_LINK_MAP[409],
-            },
-            response.json,
-        )
+        assert {
+            "detail": f"Pool: {pool_name} already exists",
+            "status": 409,
+            "title": "Conflict",
+            "type": EXCEPTIONS_LINK_MAP[409],
+        } == response.json
 
     @parameterized.expand(
         [
@@ -318,15 +300,12 @@ class TestPostPool(TestBasePoolEndpoints):
             "api/v1/pools", json=request_json, environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 400
-        self.assertDictEqual(
-            {
-                "detail": error_detail,
-                "status": 400,
-                "title": "Bad Request",
-                "type": EXCEPTIONS_LINK_MAP[400],
-            },
-            response.json,
-        )
+        assert {
+            "detail": error_detail,
+            "status": 400,
+            "title": "Bad Request",
+            "type": EXCEPTIONS_LINK_MAP[400],
+        } == response.json
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.post("api/v1/pools", json={"name": "test_pool_a", "slots": 3})
@@ -345,18 +324,15 @@ class TestPatchPool(TestBasePoolEndpoints):
             json={"name": "test_pool_a", "slots": 3},
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(response.status_code, 200)
-        self.assertEqual(
-            {
-                "occupied_slots": 0,
-                "queued_slots": 0,
-                "name": "test_pool_a",
-                "open_slots": 3,
-                "running_slots": 0,
-                "slots": 3,
-            },
-            response.json,
-        )
+        assert response.status_code == 200
+        assert {
+            "occupied_slots": 0,
+            "queued_slots": 0,
+            "name": "test_pool_a",
+            "open_slots": 3,
+            "running_slots": 0,
+            "slots": 3,
+        } == response.json
 
     @parameterized.expand(
         [
@@ -380,15 +356,12 @@ class TestPatchPool(TestBasePoolEndpoints):
             "api/v1/pools/test_pool", json=request_json, environ_overrides={'REMOTE_USER': "test"}
         )
         assert response.status_code == 400
-        self.assertEqual(
-            {
-                "detail": error_detail,
-                "status": 400,
-                "title": "Bad Request",
-                "type": EXCEPTIONS_LINK_MAP[400],
-            },
-            response.json,
-        )
+        assert {
+            "detail": error_detail,
+            "status": 400,
+            "title": "Bad Request",
+            "type": EXCEPTIONS_LINK_MAP[400],
+        } == response.json
 
     @provide_session
     def test_should_raises_401_unauthenticated(self, session):
@@ -408,15 +381,12 @@ class TestModifyDefaultPool(TestBasePoolEndpoints):
     def test_delete_400(self):
         response = self.client.delete("api/v1/pools/default_pool", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 400
-        self.assertEqual(
-            {
-                "detail": "Default Pool can't be deleted",
-                "status": 400,
-                "title": "Bad Request",
-                "type": EXCEPTIONS_LINK_MAP[400],
-            },
-            response.json,
-        )
+        assert {
+            "detail": "Default Pool can't be deleted",
+            "status": 400,
+            "title": "Bad Request",
+            "type": EXCEPTIONS_LINK_MAP[400],
+        } == response.json
 
     @parameterized.expand(
         [
@@ -492,7 +462,7 @@ class TestModifyDefaultPool(TestBasePoolEndpoints):
         del name
         response = self.client.patch(url, json=json, environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == status_code
-        self.assertEqual(response.json, expected_response)
+        assert response.json == expected_response
 
 
 class TestPatchPoolWithUpdateMask(TestBasePoolEndpoints):
@@ -531,17 +501,14 @@ class TestPatchPoolWithUpdateMask(TestBasePoolEndpoints):
         session.commit()
         response = self.client.patch(url, json=patch_json, environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(
-            {
-                "name": expected_name,
-                "slots": expected_slots,
-                "occupied_slots": 0,
-                "running_slots": 0,
-                "queued_slots": 0,
-                "open_slots": expected_slots,
-            },
-            response.json,
-        )
+        assert {
+            "name": expected_name,
+            "slots": expected_slots,
+            "occupied_slots": 0,
+            "running_slots": 0,
+            "queued_slots": 0,
+            "open_slots": expected_slots,
+        } == response.json
 
     @parameterized.expand(
         [
@@ -579,12 +546,9 @@ class TestPatchPoolWithUpdateMask(TestBasePoolEndpoints):
         session.commit()
         response = self.client.patch(url, json=patch_json, environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 400
-        self.assertEqual(
-            {
-                "detail": error_detail,
-                "status": 400,
-                "title": "Bad Request",
-                "type": EXCEPTIONS_LINK_MAP[400],
-            },
-            response.json,
-        )
+        assert {
+            "detail": error_detail,
+            "status": 400,
+            "title": "Bad Request",
+            "type": EXCEPTIONS_LINK_MAP[400],
+        } == response.json
diff --git a/tests/api_connexion/endpoints/test_task_instance_endpoint.py b/tests/api_connexion/endpoints/test_task_instance_endpoint.py
index 2fb0dc0..84c957f 100644
--- a/tests/api_connexion/endpoints/test_task_instance_endpoint.py
+++ b/tests/api_connexion/endpoints/test_task_instance_endpoint.py
@@ -142,32 +142,29 @@ class TestGetTaskInstance(TestTaskInstanceEndpoint):
             "/api/v1/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context",
             environ_overrides={"REMOTE_USER": "test"},
         )
-        self.assertEqual(response.status_code, 200)
-        self.assertDictEqual(
-            response.json,
-            {
-                "dag_id": "example_python_operator",
-                "duration": 10000.0,
-                "end_date": "2020-01-03T00:00:00+00:00",
-                "execution_date": "2020-01-01T00:00:00+00:00",
-                "executor_config": "{}",
-                "hostname": "",
-                "max_tries": 0,
-                "operator": "PythonOperator",
-                "pid": 100,
-                "pool": "default_pool",
-                "pool_slots": 1,
-                "priority_weight": 6,
-                "queue": "default_queue",
-                "queued_when": None,
-                "sla_miss": None,
-                "start_date": "2020-01-02T00:00:00+00:00",
-                "state": "running",
-                "task_id": "print_the_context",
-                "try_number": 0,
-                "unixname": getpass.getuser(),
-            },
-        )
+        assert response.status_code == 200
+        assert response.json == {
+            "dag_id": "example_python_operator",
+            "duration": 10000.0,
+            "end_date": "2020-01-03T00:00:00+00:00",
+            "execution_date": "2020-01-01T00:00:00+00:00",
+            "executor_config": "{}",
+            "hostname": "",
+            "max_tries": 0,
+            "operator": "PythonOperator",
+            "pid": 100,
+            "pool": "default_pool",
+            "pool_slots": 1,
+            "priority_weight": 6,
+            "queue": "default_queue",
+            "queued_when": None,
+            "sla_miss": None,
+            "start_date": "2020-01-02T00:00:00+00:00",
+            "state": "running",
+            "task_id": "print_the_context",
+            "try_number": 0,
+            "unixname": getpass.getuser(),
+        }
 
     @provide_session
     def test_should_respond_200_task_instance_with_sla(self, session):
@@ -185,41 +182,38 @@ class TestGetTaskInstance(TestTaskInstanceEndpoint):
             "/api/v1/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context",
             environ_overrides={"REMOTE_USER": "test"},
         )
-        self.assertEqual(response.status_code, 200)
+        assert response.status_code == 200
 
-        self.assertDictEqual(
-            response.json,
-            {
+        assert response.json == {
+            "dag_id": "example_python_operator",
+            "duration": 10000.0,
+            "end_date": "2020-01-03T00:00:00+00:00",
+            "execution_date": "2020-01-01T00:00:00+00:00",
+            "executor_config": "{}",
+            "hostname": "",
+            "max_tries": 0,
+            "operator": "PythonOperator",
+            "pid": 100,
+            "pool": "default_pool",
+            "pool_slots": 1,
+            "priority_weight": 6,
+            "queue": "default_queue",
+            "queued_when": None,
+            "sla_miss": {
                 "dag_id": "example_python_operator",
-                "duration": 10000.0,
-                "end_date": "2020-01-03T00:00:00+00:00",
+                "description": None,
+                "email_sent": False,
                 "execution_date": "2020-01-01T00:00:00+00:00",
-                "executor_config": "{}",
-                "hostname": "",
-                "max_tries": 0,
-                "operator": "PythonOperator",
-                "pid": 100,
-                "pool": "default_pool",
-                "pool_slots": 1,
-                "priority_weight": 6,
-                "queue": "default_queue",
-                "queued_when": None,
-                "sla_miss": {
-                    "dag_id": "example_python_operator",
-                    "description": None,
-                    "email_sent": False,
-                    "execution_date": "2020-01-01T00:00:00+00:00",
-                    "notification_sent": False,
-                    "task_id": "print_the_context",
-                    "timestamp": "2020-01-01T00:00:00+00:00",
-                },
-                "start_date": "2020-01-02T00:00:00+00:00",
-                "state": "running",
+                "notification_sent": False,
                 "task_id": "print_the_context",
-                "try_number": 0,
-                "unixname": getpass.getuser(),
+                "timestamp": "2020-01-01T00:00:00+00:00",
             },
-        )
+            "start_date": "2020-01-02T00:00:00+00:00",
+            "state": "running",
+            "task_id": "print_the_context",
+            "try_number": 0,
+            "unixname": getpass.getuser(),
+        }
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.get(
@@ -407,9 +401,9 @@ class TestGetTaskInstances(TestTaskInstanceEndpoint):
             task_instances=task_instances,
         )
         response = self.client.get(url, environ_overrides={"REMOTE_USER": "test"})
-        self.assertEqual(response.status_code, 200)
-        self.assertEqual(response.json["total_entries"], expected_ti)
-        self.assertEqual(len(response.json["task_instances"]), expected_ti)
+        assert response.status_code == 200
+        assert response.json["total_entries"] == expected_ti
+        assert len(response.json["task_instances"]) == expected_ti
 
     @provide_session
     def test_should_respond_200_for_dag_id_filter(self, session):
@@ -420,10 +414,10 @@ class TestGetTaskInstances(TestTaskInstanceEndpoint):
             environ_overrides={"REMOTE_USER": "test"},
         )
 
-        self.assertEqual(response.status_code, 200)
+        assert response.status_code == 200
         count = session.query(TaskInstance).filter(TaskInstance.dag_id == "example_python_operator").count()
-        self.assertEqual(count, response.json["total_entries"])
-        self.assertEqual(count, len(response.json["task_instances"]))
+        assert count == response.json["total_entries"]
+        assert count == len(response.json["task_instances"])
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.get(
@@ -555,9 +549,9 @@ class TestGetTaskInstancesBatch(TestTaskInstanceEndpoint):
             environ_overrides={"REMOTE_USER": "test"},
             json=payload,
         )
-        self.assertEqual(response.status_code, 200, response.json)
-        self.assertEqual(expected_ti_count, response.json["total_entries"])
-        self.assertEqual(expected_ti_count, len(response.json["task_instances"]))
+        assert response.status_code == 200, response.json
+        assert expected_ti_count == response.json["total_entries"]
+        assert expected_ti_count == len(response.json["task_instances"])
 
     @parameterized.expand(
         [
@@ -595,9 +589,9 @@ class TestGetTaskInstancesBatch(TestTaskInstanceEndpoint):
             environ_overrides={"REMOTE_USER": "test"},
             json=payload,
         )
-        self.assertEqual(response.status_code, 200, response.json)
-        self.assertEqual(expected_ti_count, response.json["total_entries"])
-        self.assertEqual(expected_ti_count, len(response.json["task_instances"]))
+        assert response.status_code == 200, response.json
+        assert expected_ti_count == response.json["total_entries"]
+        assert expected_ti_count == len(response.json["task_instances"])
 
     @parameterized.expand(
         [
@@ -618,9 +612,9 @@ class TestGetTaskInstancesBatch(TestTaskInstanceEndpoint):
             environ_overrides={"REMOTE_USER": "test"},
             json=payload,
         )
-        self.assertEqual(response.status_code, 200)
-        self.assertEqual(len(response.json["task_instances"]), expected_ti)
-        self.assertEqual(response.json["total_entries"], total_ti)
+        assert response.status_code == 200
+        assert len(response.json["task_instances"]) == expected_ti
+        assert response.json["total_entries"] == total_ti
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.post(
@@ -815,8 +809,8 @@ class TestPostClearTaskInstances(TestTaskInstanceEndpoint):
             environ_overrides={"REMOTE_USER": "test"},
             json=payload,
         )
-        self.assertEqual(response.status_code, 200)
-        self.assertEqual(len(response.json["task_instances"]), expected_ti)
+        assert response.status_code == 200
+        assert len(response.json["task_instances"]) == expected_ti
 
     @provide_session
     def test_should_respond_200_with_reset_dag_run(self, session):
@@ -865,7 +859,7 @@ class TestPostClearTaskInstances(TestTaskInstanceEndpoint):
         failed_dag_runs = (
             session.query(DagRun).filter(DagRun.state == "failed").count()  # pylint: disable=W0143
         )
-        self.assertEqual(200, response.status_code)
+        assert 200 == response.status_code
         expected_response = [
             {
                 'dag_id': 'example_python_operator',
@@ -899,9 +893,9 @@ class TestPostClearTaskInstances(TestTaskInstanceEndpoint):
             },
         ]
         for task_instance in expected_response:
-            self.assertIn(task_instance, response.json["task_instances"])
-        self.assertEqual(5, len(response.json["task_instances"]))
-        self.assertEqual(0, failed_dag_runs, 0)
+            assert task_instance in response.json["task_instances"]
+        assert 5 == len(response.json["task_instances"])
+        assert 0 == failed_dag_runs, 0
 
     def test_should_raises_401_unauthenticated(self):
         response = self.client.post(
@@ -961,7 +955,7 @@ class TestPostClearTaskInstances(TestTaskInstanceEndpoint):
             json=payload,
         )
         assert response.status_code == 400
-        self.assertEqual(response.json['detail'], expected)
+        assert response.json['detail'] == expected
 
 
 class TestPostSetTaskInstanceState(TestTaskInstanceEndpoint):
diff --git a/tests/api_connexion/endpoints/test_variable_endpoint.py b/tests/api_connexion/endpoints/test_variable_endpoint.py
index 5d8685f..0be9fd2 100644
--- a/tests/api_connexion/endpoints/test_variable_endpoint.py
+++ b/tests/api_connexion/endpoints/test_variable_endpoint.py
@@ -182,7 +182,7 @@ class TestGetVariables(TestVariableEndpoint):
             Variable.set(f"var{i}", i)
         response = self.client.get("/api/v1/variables?limit=180", environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(len(response.json['variables']), 150)
+        assert len(response.json['variables']) == 150
 
     def test_should_raises_401_unauthenticated(self):
         Variable.set("var1", 1)
diff --git a/tests/api_connexion/endpoints/test_version_endpoint.py b/tests/api_connexion/endpoints/test_version_endpoint.py
index 072e1f6..f046669 100644
--- a/tests/api_connexion/endpoints/test_version_endpoint.py
+++ b/tests/api_connexion/endpoints/test_version_endpoint.py
@@ -36,6 +36,6 @@ class TestGetHealthTest(unittest.TestCase):
     def test_should_respond_200(self, mock_get_airflow_get_commit):
         response = self.client.get("/api/v1/version")
 
-        self.assertEqual(200, response.status_code)
-        self.assertEqual({'git_version': 'GIT_COMMIT', 'version': 'MOCK_VERSION'}, response.json)
+        assert 200 == response.status_code
+        assert {'git_version': 'GIT_COMMIT', 'version': 'MOCK_VERSION'} == response.json
         mock_get_airflow_get_commit.assert_called_once_with()
diff --git a/tests/api_connexion/endpoints/test_xcom_endpoint.py b/tests/api_connexion/endpoints/test_xcom_endpoint.py
index 95ce05e..e38bf5f 100644
--- a/tests/api_connexion/endpoints/test_xcom_endpoint.py
+++ b/tests/api_connexion/endpoints/test_xcom_endpoint.py
@@ -103,21 +103,18 @@ class TestGetXComEntry(TestXComEndpoint):
             f"/api/v1/dags/{dag_id}/dagRuns/{dag_run_id}/taskInstances/{task_id}/xcomEntries/{xcom_key}",
             environ_overrides={'REMOTE_USER': "test"},
         )
-        self.assertEqual(200, response.status_code)
+        assert 200 == response.status_code
 
         current_data = response.json
         current_data['timestamp'] = 'TIMESTAMP'
-        self.assertEqual(
-            current_data,
-            {
-                'dag_id': dag_id,
-                'execution_date': execution_date,
-                'key': xcom_key,
-                'task_id': task_id,
-                'timestamp': 'TIMESTAMP',
-                'value': 'TEST_VALUE',
-            },
-        )
+        assert current_data == {
+            'dag_id': dag_id,
+            'execution_date': execution_date,
+            'key': xcom_key,
+            'task_id': task_id,
+            'timestamp': 'TIMESTAMP',
+            'value': 'TEST_VALUE',
+        }
 
     def test_should_raises_401_unauthenticated(self):
         dag_id = 'test-dag-id'
@@ -181,32 +178,29 @@ class TestGetXComEntries(TestXComEndpoint):
             environ_overrides={'REMOTE_USER': "test"},
         )
 
-        self.assertEqual(200, response.status_code)
+        assert 200 == response.status_code
         response_data = response.json
         for xcom_entry in response_data['xcom_entries']:
             xcom_entry['timestamp'] = "TIMESTAMP"
-        self.assertEqual(
-            response.json,
-            {
-                'xcom_entries': [
-                    {
-                        'dag_id': dag_id,
-                        'execution_date': execution_date,
-                        'key': 'test-xcom-key-1',
-                        'task_id': task_id,
-                        'timestamp': "TIMESTAMP",
-                    },
-                    {
-                        'dag_id': dag_id,
-                        'execution_date': execution_date,
-                        'key': 'test-xcom-key-2',
-                        'task_id': task_id,
-                        'timestamp': "TIMESTAMP",
-                    },
-                ],
-                'total_entries': 2,
-            },
-        )
+        assert response.json == {
+            'xcom_entries': [
+                {
+                    'dag_id': dag_id,
+                    'execution_date': execution_date,
+                    'key': 'test-xcom-key-1',
+                    'task_id': task_id,
+                    'timestamp': "TIMESTAMP",
+                },
+                {
+                    'dag_id': dag_id,
+                    'execution_date': execution_date,
+                    'key': 'test-xcom-key-2',
+                    'task_id': task_id,
+                    'timestamp': "TIMESTAMP",
+                },
+            ],
+            'total_entries': 2,
+        }
 
     def test_should_respond_200_with_tilde_and_access_to_all_dags(self):
         dag_id_1 = 'test-dag-id-1'
@@ -227,46 +221,43 @@ class TestGetXComEntries(TestXComEndpoint):
             environ_overrides={'REMOTE_USER': "test"},
         )
 
-        self.assertEqual(200, response.status_code)
+        assert 200 == response.status_code
         response_data = response.json
         for xcom_entry in response_data['xcom_entries']:
             xcom_entry['timestamp'] = "TIMESTAMP"
-        self.assertEqual(
-            response.json,
-            {
-                'xcom_entries': [
-                    {
-                        'dag_id': dag_id_1,
-                        'execution_date': execution_date,
-                        'key': 'test-xcom-key-1',
-                        'task_id': task_id_1,
-                        'timestamp': "TIMESTAMP",
-                    },
-                    {
-                        'dag_id': dag_id_1,
-                        'execution_date': execution_date,
-                        'key': 'test-xcom-key-2',
-                        'task_id': task_id_1,
-                        'timestamp': "TIMESTAMP",
-                    },
-                    {
-                        'dag_id': dag_id_2,
-                        'execution_date': execution_date,
-                        'key': 'test-xcom-key-1',
-                        'task_id': task_id_2,
-                        'timestamp': "TIMESTAMP",
-                    },
-                    {
-                        'dag_id': dag_id_2,
-                        'execution_date': execution_date,
-                        'key': 'test-xcom-key-2',
-                        'task_id': task_id_2,
-                        'timestamp': "TIMESTAMP",
-                    },
-                ],
-                'total_entries': 4,
-            },
-        )
+        assert response.json == {
+            'xcom_entries': [
+                {
+                    'dag_id': dag_id_1,
+                    'execution_date': execution_date,
+                    'key': 'test-xcom-key-1',
+                    'task_id': task_id_1,
+                    'timestamp': "TIMESTAMP",
+                },
+                {
+                    'dag_id': dag_id_1,
+                    'execution_date': execution_date,
+                    'key': 'test-xcom-key-2',
+                    'task_id': task_id_1,
+                    'timestamp': "TIMESTAMP",
+                },
+                {
+                    'dag_id': dag_id_2,
+                    'execution_date': execution_date,
+                    'key': 'test-xcom-key-1',
+                    'task_id': task_id_2,
+                    'timestamp': "TIMESTAMP",
+                },
+                {
+                    'dag_id': dag_id_2,
+                    'execution_date': execution_date,
+                    'key': 'test-xcom-key-2',
+                    'task_id': task_id_2,
+                    'timestamp': "TIMESTAMP",
+                },
+            ],
+            'total_entries': 4,
+        }
 
     def test_should_respond_200_with_tilde_and_granular_dag_access(self):
         dag_id_1 = 'test-dag-id-1'
@@ -286,32 +277,29 @@ class TestGetXComEntries(TestXComEndpoint):
             environ_overrides={'REMOTE_USER': "test_granular_permissions"},
         )
 
-        self.assertEqual(200, response.status_code)
+        assert 200 == response.status_code
         response_data = response.json
         for xcom_entry in response_data['xcom_entries']:
             xcom_entry['timestamp'] = "TIMESTAMP"
-        self.assertEqual(
-            response.json,
-            {
-                'xcom_entries': [
-                    {
-                        'dag_id': dag_id_1,
-                        'execution_date': execution_date,
-                        'key': 'test-xcom-key-1',
-                        'task_id': task_id_1,
-                        'timestamp': "TIMESTAMP",
-                    },
-                    {
-                        'dag_id': dag_id_1,
-                        'execution_date': execution_date,
-                        'key': 'test-xcom-key-2',
-                        'task_id': task_id_1,
-                        'timestamp': "TIMESTAMP",
-                    },
-                ],
-                'total_entries': 2,
-            },
-        )
+        assert response.json == {
+            'xcom_entries': [
+                {
+                    'dag_id': dag_id_1,
+                    'execution_date': execution_date,
+                    'key': 'test-xcom-key-1',
+                    'task_id': task_id_1,
+                    'timestamp': "TIMESTAMP",
+                },
+                {
+                    'dag_id': dag_id_1,
+                    'execution_date': execution_date,
+                    'key': 'test-xcom-key-2',
+                    'task_id': task_id_1,
+                    'timestamp': "TIMESTAMP",
+                },
+            ],
+            'total_entries': 2,
+        }
 
     def test_should_raises_401_unauthenticated(self):
         dag_id = 'test-dag-id'
@@ -461,9 +449,9 @@ class TestPaginationGetXComEntries(TestXComEndpoint):
         session.commit()
         response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
         assert response.status_code == 200
-        self.assertEqual(response.json["total_entries"], 10)
+        assert response.json["total_entries"] == 10
         conn_ids = [conn["key"] for conn in response.json["xcom_entries"] if conn]
-        self.assertEqual(conn_ids, expected_xcom_ids)
+        assert conn_ids == expected_xcom_ids
 
     def _create_xcoms(self, count):
         return [
diff --git a/tests/api_connexion/schemas/test_common_schema.py b/tests/api_connexion/schemas/test_common_schema.py
index c6cecdb..c734483 100644
--- a/tests/api_connexion/schemas/test_common_schema.py
+++ b/tests/api_connexion/schemas/test_common_schema.py
@@ -18,6 +18,7 @@
 import datetime
 import unittest
 
+import pytest
 from dateutil import relativedelta
 
 from airflow.api_connexion.schemas.common_schema import (
@@ -34,14 +35,14 @@ class TestTimeDeltaSchema(unittest.TestCase):
         instance = datetime.timedelta(days=12)
         schema_instance = TimeDeltaSchema()
         result = schema_instance.dump(instance)
-        self.assertEqual({"__type": "TimeDelta", "days": 12, "seconds": 0, "microseconds": 0}, result)
+        assert {"__type": "TimeDelta", "days": 12, "seconds": 0, "microseconds": 0} == result
 
     def test_should_deserialize(self):
         instance = {"__type": "TimeDelta", "days": 12, "seconds": 0, "microseconds": 0}
         schema_instance = TimeDeltaSchema()
         result = schema_instance.load(instance)
         expected_instance = datetime.timedelta(days=12)
-        self.assertEqual(expected_instance, result)
+        assert expected_instance == result
 
 
 class TestRelativeDeltaSchema(unittest.TestCase):
@@ -49,34 +50,31 @@ class TestRelativeDeltaSchema(unittest.TestCase):
         instance = relativedelta.relativedelta(days=+12)
         schema_instance = RelativeDeltaSchema()
         result = schema_instance.dump(instance)
-        self.assertEqual(
-            {
-                '__type': 'RelativeDelta',
-                "day": None,
-                "days": 12,
-                "hour": None,
-                "hours": 0,
-                "leapdays": 0,
-                "microsecond": None,
-                "microseconds": 0,
-                "minute": None,
-                "minutes": 0,
-                "month": None,
-                "months": 0,
-                "second": None,
-                "seconds": 0,
-                "year": None,
-                "years": 0,
-            },
-            result,
-        )
+        assert {
+            '__type': 'RelativeDelta',
+            "day": None,
+            "days": 12,
+            "hour": None,
+            "hours": 0,
+            "leapdays": 0,
+            "microsecond": None,
+            "microseconds": 0,
+            "minute": None,
+            "minutes": 0,
+            "month": None,
+            "months": 0,
+            "second": None,
+            "seconds": 0,
+            "year": None,
+            "years": 0,
+        } == result
 
     def test_should_deserialize(self):
         instance = {"__type": "RelativeDelta", "days": 12, "seconds": 0}
         schema_instance = RelativeDeltaSchema()
         result = schema_instance.load(instance)
         expected_instance = relativedelta.relativedelta(days=+12)
-        self.assertEqual(expected_instance, result)
+        assert expected_instance == result
 
 
 class TestCronExpressionSchema(unittest.TestCase):
@@ -85,7 +83,7 @@ class TestCronExpressionSchema(unittest.TestCase):
         schema_instance = CronExpressionSchema()
         result = schema_instance.load(instance)
         expected_instance = CronExpression("5 4 * * *")
-        self.assertEqual(expected_instance, result)
+        assert expected_instance == result
 
 
 class TestScheduleIntervalSchema(unittest.TestCase):
@@ -93,57 +91,54 @@ class TestScheduleIntervalSchema(unittest.TestCase):
         instance = datetime.timedelta(days=12)
         schema_instance = ScheduleIntervalSchema()
         result = schema_instance.dump(instance)
-        self.assertEqual({"__type": "TimeDelta", "days": 12, "seconds": 0, "microseconds": 0}, result)
+        assert {"__type": "TimeDelta", "days": 12, "seconds": 0, "microseconds": 0} == result
 
     def test_should_deserialize_timedelta(self):
         instance = {"__type": "TimeDelta", "days": 12, "seconds": 0, "microseconds": 0}
         schema_instance = ScheduleIntervalSchema()
         result = schema_instance.load(instance)
         expected_instance = datetime.timedelta(days=12)
-        self.assertEqual(expected_instance, result)
+        assert expected_instance == result
 
     def test_should_serialize_relative_delta(self):
         instance = relativedelta.relativedelta(days=+12)
         schema_instance = ScheduleIntervalSchema()
         result = schema_instance.dump(instance)
-        self.assertEqual(
-            {
-                "__type": "RelativeDelta",
-                "day": None,
-                "days": 12,
-                "hour": None,
-                "hours": 0,
-                "leapdays": 0,
-                "microsecond": None,
-                "microseconds": 0,
-                "minute": None,
-                "minutes": 0,
-                "month": None,
-                "months": 0,
-                "second": None,
-                "seconds": 0,
-                "year": None,
-                "years": 0,
-            },
-            result,
-        )
+        assert {
+            "__type": "RelativeDelta",
+            "day": None,
+            "days": 12,
+            "hour": None,
+            "hours": 0,
+            "leapdays": 0,
+            "microsecond": None,
+            "microseconds": 0,
+            "minute": None,
+            "minutes": 0,
+            "month": None,
+            "months": 0,
+            "second": None,
+            "seconds": 0,
+            "year": None,
+            "years": 0,
+        } == result
 
     def test_should_deserialize_relative_delta(self):
         instance = {"__type": "RelativeDelta", "days": 12, "seconds": 0}
         schema_instance = ScheduleIntervalSchema()
         result = schema_instance.load(instance)
         expected_instance = relativedelta.relativedelta(days=+12)
-        self.assertEqual(expected_instance, result)
+        assert expected_instance == result
 
     def test_should_serialize_cron_expression(self):
         instance = "5 4 * * *"
         schema_instance = ScheduleIntervalSchema()
         result = schema_instance.dump(instance)
         expected_instance = {"__type": "CronExpression", "value": "5 4 * * *"}
-        self.assertEqual(expected_instance, result)
+        assert expected_instance == result
 
     def test_should_error_unknown_obj_type(self):
         instance = 342
         schema_instance = ScheduleIntervalSchema()
-        with self.assertRaisesRegex(Exception, "Unknown object type: int"):
+        with pytest.raises(Exception, match="Unknown object type: int"):
             schema_instance.dump(instance)
diff --git a/tests/api_connexion/schemas/test_connection_schema.py b/tests/api_connexion/schemas/test_connection_schema.py
index 5a4c580..983a735 100644
--- a/tests/api_connexion/schemas/test_connection_schema.py
+++ b/tests/api_connexion/schemas/test_connection_schema.py
@@ -18,6 +18,7 @@ import re
 import unittest
 
 import marshmallow
+import pytest
 
 from airflow.api_connexion.schemas.connection_schema import (
     ConnectionCollection,
@@ -52,17 +53,14 @@ class TestConnectionCollectionItemSchema(unittest.TestCase):
         session.commit()
         connection_model = session.query(Connection).first()
         deserialized_connection = connection_collection_item_schema.dump(connection_model)
-        self.assertEqual(
-            deserialized_connection,
-            {
-                'connection_id': "mysql_default",
-                'conn_type': 'mysql',
-                'host': 'mysql',
-                'login': 'login',
-                'schema': 'testschema',
-                'port': 80,
-            },
-        )
+        assert deserialized_connection == {
+            'connection_id': "mysql_default",
+            'conn_type': 'mysql',
+            'host': 'mysql',
+            'login': 'login',
+            'schema': 'testschema',
+            'port': 80,
+        }
 
     def test_deserialize(self):
         connection_dump_1 = {
@@ -80,32 +78,26 @@ class TestConnectionCollectionItemSchema(unittest.TestCase):
         result_1 = connection_collection_item_schema.load(connection_dump_1)
         result_2 = connection_collection_item_schema.load(connection_dump_2)
 
-        self.assertEqual(
-            result_1,
-            {
-                'conn_id': "mysql_default_1",
-                'conn_type': 'mysql',
-                'host': 'mysql',
-                'login': 'login',
-                'schema': 'testschema',
-                'port': 80,
-            },
-        )
-        self.assertEqual(
-            result_2,
-            {
-                'conn_id': "mysql_default_2",
-                'conn_type': "postgres",
-            },
-        )
+        assert result_1 == {
+            'conn_id': "mysql_default_1",
+            'conn_type': 'mysql',
+            'host': 'mysql',
+            'login': 'login',
+            'schema': 'testschema',
+            'port': 80,
+        }
+        assert result_2 == {
+            'conn_id': "mysql_default_2",
+            'conn_type': "postgres",
+        }
 
     def test_deserialize_required_fields(self):
         connection_dump_1 = {
             'connection_id': "mysql_default_2",
         }
-        with self.assertRaisesRegex(
+        with pytest.raises(
             marshmallow.exceptions.ValidationError,
-            re.escape("{'conn_type': ['Missing data for required field.']}"),
+            match=re.escape("{'conn_type': ['Missing data for required field.']}"),
         ):
             connection_collection_item_schema.load(connection_dump_1)
 
@@ -127,30 +119,27 @@ class TestConnectionCollectionSchema(unittest.TestCase):
         session.commit()
         instance = ConnectionCollection(connections=connections, total_entries=2)
         deserialized_connections = connection_collection_schema.dump(instance)
-        self.assertEqual(
-            deserialized_connections,
-            {
-                'connections': [
-                    {
-                        "connection_id": "mysql_default_1",
-                        "conn_type": "test-type",
-                        "host": None,
-                        "login": None,
-                        'schema': None,
-                        'port': None,
-                    },
-                    {
-                        "connection_id": "mysql_default_2",
-                        "conn_type": "test-type2",
-                        "host": None,
-                        "login": None,
-                        'schema': None,
-                        'port': None,
-                    },
-                ],
-                'total_entries': 2,
-            },
-        )
+        assert deserialized_connections == {
+            'connections': [
+                {
+                    "connection_id": "mysql_default_1",
+                    "conn_type": "test-type",
+                    "host": None,
+                    "login": None,
+                    'schema': None,
+                    'port': None,
+                },
+                {
+                    "connection_id": "mysql_default_2",
+                    "conn_type": "test-type2",
+                    "host": None,
+                    "login": None,
+                    'schema': None,
+                    'port': None,
+                },
+            ],
+            'total_entries': 2,
+        }
 
 
 class TestConnectionSchema(unittest.TestCase):
@@ -177,18 +166,15 @@ class TestConnectionSchema(unittest.TestCase):
         session.commit()
         connection_model = session.query(Connection).first()
         deserialized_connection = connection_schema.dump(connection_model)
-        self.assertEqual(
-            deserialized_connection,
-            {
-                'connection_id': "mysql_default",
-                'conn_type': 'mysql',
-                'host': 'mysql',
-                'login': 'login',
-                'schema': 'testschema',
-                'port': 80,
-                'extra': "{'key':'string'}",
-            },
-        )
+        assert deserialized_connection == {
+            'connection_id': "mysql_default",
+            'conn_type': 'mysql',
+            'host': 'mysql',
+            'login': 'login',
+            'schema': 'testschema',
+            'port': 80,
+            'extra': "{'key':'string'}",
+        }
 
     def test_deserialize(self):
         den = {
@@ -201,15 +187,12 @@ class TestConnectionSchema(unittest.TestCase):
             'extra': "{'key':'string'}",
         }
         result = connection_schema.load(den)
-        self.assertEqual(
-            result,
-            {
-                'conn_id': "mysql_default",
-                'conn_type': 'mysql',
-                'host': 'mysql',
-                'login': 'login',
-                'schema': 'testschema',
-                'port': 80,
-                'extra': "{'key':'string'}",
-            },
-        )
+        assert result == {
+            'conn_id': "mysql_default",
+            'conn_type': 'mysql',
+            'host': 'mysql',
+            'login': 'login',
+            'schema': 'testschema',
+            'port': 80,
+            'extra': "{'key':'string'}",
+        }
diff --git a/tests/api_connexion/schemas/test_dag_run_schema.py b/tests/api_connexion/schemas/test_dag_run_schema.py
index 5569dbc..3e6bf2e 100644
--- a/tests/api_connexion/schemas/test_dag_run_schema.py
+++ b/tests/api_connexion/schemas/test_dag_run_schema.py
@@ -16,6 +16,7 @@
 # under the License.
 import unittest
 
+import pytest
 from dateutil.parser import parse
 from parameterized import parameterized
 
@@ -58,19 +59,16 @@ class TestDAGRunSchema(TestDAGRunBase):
         dagrun_model = session.query(DagRun).first()
         deserialized_dagrun = dagrun_schema.dump(dagrun_model)
 
-        self.assertEqual(
-            deserialized_dagrun,
-            {
-                "dag_id": None,
-                "dag_run_id": "my-dag-run",
-                "end_date": None,
-                "state": "running",
-                "execution_date": self.default_time,
-                "external_trigger": True,
-                "start_date": self.default_time,
-                "conf": {"start": "stop"},
-            },
-        )
+        assert deserialized_dagrun == {
+            "dag_id": None,
+            "dag_run_id": "my-dag-run",
+            "end_date": None,
+            "state": "running",
+            "execution_date": self.default_time,
+            "external_trigger": True,
+            "start_date": self.default_time,
+            "conf": {"start": "stop"},
+        }
 
     @parameterized.expand(
         [
@@ -106,22 +104,19 @@ class TestDAGRunSchema(TestDAGRunBase):
     )
     def test_deserialize(self, serialized_dagrun, expected_result):
         result = dagrun_schema.load(serialized_dagrun)
-        self.assertDictEqual(result, expected_result)
+        assert result == expected_result
 
     def test_autofill_fields(self):
         """Dag_run_id and execution_date fields are autogenerated if missing"""
         serialized_dagrun = {}
         result = dagrun_schema.load(serialized_dagrun)
-        self.assertDictEqual(
-            result,
-            {"execution_date": result["execution_date"], "run_id": result["run_id"]},
-        )
+        assert result == {"execution_date": result["execution_date"], "run_id": result["run_id"]}
 
     def test_invalid_execution_date_raises(self):
         serialized_dagrun = {"execution_date": "mydate"}
-        with self.assertRaises(BadRequest) as e:
+        with pytest.raises(BadRequest) as ctx:
             dagrun_schema.load(serialized_dagrun)
-        self.assertEqual(str(e.exception), "Incorrect datetime argument")
+        assert str(ctx.value) == "Incorrect datetime argument"
 
 
 class TestDagRunCollection(TestDAGRunBase):
@@ -145,31 +140,28 @@ class TestDagRunCollection(TestDAGRunBase):
         session.commit()
         instance = DAGRunCollection(dag_runs=dagruns, total_entries=2)
         deserialized_dagruns = dagrun_collection_schema.dump(instance)
-        self.assertEqual(
-            deserialized_dagruns,
-            {
-                "dag_runs": [
-                    {
-                        "dag_id": None,
-                        "dag_run_id": "my-dag-run",
-                        "end_date": None,
-                        "execution_date": self.default_time,
-                        "external_trigger": True,
-                        "state": "running",
-                        "start_date": self.default_time,
-                        "conf": {"start": "stop"},
-                    },
-                    {
-                        "dag_id": None,
-                        "dag_run_id": "my-dag-run-2",
-                        "end_date": None,
-                        "state": "running",
-                        "execution_date": self.default_time,
-                        "external_trigger": True,
-                        "start_date": self.default_time,
-                        "conf": {},
-                    },
-                ],
-                "total_entries": 2,
-            },
-        )
+        assert deserialized_dagruns == {
+            "dag_runs": [
+                {
+                    "dag_id": None,
+                    "dag_run_id": "my-dag-run",
+                    "end_date": None,
+                    "execution_date": self.default_time,
+                    "external_trigger": True,
+                    "state": "running",
+                    "start_date": self.default_time,
+                    "conf": {"start": "stop"},
+                },
+                {
+                    "dag_id": None,
+                    "dag_run_id": "my-dag-run-2",
+                    "end_date": None,
+                    "state": "running",
+                    "execution_date": self.default_time,
+                    "external_trigger": True,
+                    "start_date": self.default_time,
+                    "conf": {},
+                },
+            ],
+            "total_entries": 2,
+        }
diff --git a/tests/api_connexion/schemas/test_dag_schema.py b/tests/api_connexion/schemas/test_dag_schema.py
index bc18b3c..4b63795 100644
--- a/tests/api_connexion/schemas/test_dag_schema.py
+++ b/tests/api_connexion/schemas/test_dag_schema.py
@@ -47,21 +47,18 @@ class TestDagSchema(unittest.TestCase):
             tags=[DagTag(name="tag-1"), DagTag(name="tag-2")],
         )
         serialized_dag = DAGSchema().dump(dag_model)
-        self.assertEqual(
-            {
-                "dag_id": "test_dag_id",
-                "description": "The description",
-                "fileloc": "/root/airflow/dags/my_dag.py",
-                "file_token": SERIALIZER.dumps("/root/airflow/dags/my_dag.py"),
-                "is_paused": True,
-                "is_subdag": False,
-                "owners": ["airflow1", "airflow2"],
-                "root_dag_id": "test_root_dag_id",
-                "schedule_interval": {"__type": "CronExpression", "value": "5 4 * * *"},
-                "tags": [{"name": "tag-1"}, {"name": "tag-2"}],
-            },
-            serialized_dag,
-        )
+        assert {
+            "dag_id": "test_dag_id",
+            "description": "The description",
+            "fileloc": "/root/airflow/dags/my_dag.py",
+            "file_token": SERIALIZER.dumps("/root/airflow/dags/my_dag.py"),
+            "is_paused": True,
+            "is_subdag": False,
+            "owners": ["airflow1", "airflow2"],
+            "root_dag_id": "test_root_dag_id",
+            "schedule_interval": {"__type": "CronExpression", "value": "5 4 * * *"},
+            "tags": [{"name": "tag-1"}, {"name": "tag-2"}],
+        } == serialized_dag
 
 
 class TestDAGCollectionSchema(unittest.TestCase):
@@ -70,38 +67,35 @@ class TestDAGCollectionSchema(unittest.TestCase):
         dag_model_b = DagModel(dag_id="test_dag_id_b", fileloc="/tmp/a.py")
         schema = DAGCollectionSchema()
         instance = DAGCollection(dags=[dag_model_a, dag_model_b], total_entries=2)
-        self.assertEqual(
-            {
-                "dags": [
-                    {
-                        "dag_id": "test_dag_id_a",
-                        "description": None,
-                        "fileloc": "/tmp/a.py",
-                        "file_token": SERIALIZER.dumps("/tmp/a.py"),
-                        "is_paused": None,
-                        "is_subdag": None,
-                        "owners": [],
-                        "root_dag_id": None,
-                        "schedule_interval": None,
-                        "tags": [],
-                    },
-                    {
-                        "dag_id": "test_dag_id_b",
-                        "description": None,
-                        "fileloc": "/tmp/a.py",
-                        "file_token": SERIALIZER.dumps("/tmp/a.py"),
-                        "is_paused": None,
-                        "is_subdag": None,
-                        "owners": [],
-                        "root_dag_id": None,
-                        "schedule_interval": None,
-                        "tags": [],
-                    },
-                ],
-                "total_entries": 2,
-            },
-            schema.dump(instance),
-        )
+        assert {
+            "dags": [
+                {
+                    "dag_id": "test_dag_id_a",
+                    "description": None,
+                    "fileloc": "/tmp/a.py",
+                    "file_token": SERIALIZER.dumps("/tmp/a.py"),
+                    "is_paused": None,
+                    "is_subdag": None,
+                    "owners": [],
+                    "root_dag_id": None,
+                    "schedule_interval": None,
+                    "tags": [],
+                },
+                {
+                    "dag_id": "test_dag_id_b",
+                    "description": None,
+                    "fileloc": "/tmp/a.py",
+                    "file_token": SERIALIZER.dumps("/tmp/a.py"),
+                    "is_paused": None,
+                    "is_subdag": None,
+                    "owners": [],
+                    "root_dag_id": None,
+                    "schedule_interval": None,
+                    "tags": [],
+                },
+            ],
+            "total_entries": 2,
+        } == schema.dump(instance)
 
 
 class TestDAGDetailSchema:
diff --git a/tests/api_connexion/schemas/test_error_schema.py b/tests/api_connexion/schemas/test_error_schema.py
index c2f6aef..02d574f 100644
--- a/tests/api_connexion/schemas/test_error_schema.py
+++ b/tests/api_connexion/schemas/test_error_schema.py
@@ -48,15 +48,12 @@ class TestErrorSchema(TestErrorSchemaBase):
         session.commit()
         serialized_data = import_error_schema.dump(import_error)
         serialized_data["import_error_id"] = 1
-        self.assertEqual(
-            {
-                "filename": "lorem.py",
-                "import_error_id": 1,
-                "stack_trace": "Lorem Ipsum",
-                "timestamp": "2020-06-10T12:02:44+00:00",
-            },
-            serialized_data,
-        )
+        assert {
+            "filename": "lorem.py",
+            "import_error_id": 1,
+            "stack_trace": "Lorem Ipsum",
+            "timestamp": "2020-06-10T12:02:44+00:00",
+        } == serialized_data
 
 
 class TestErrorCollectionSchema(TestErrorSchemaBase):
@@ -80,23 +77,20 @@ class TestErrorCollectionSchema(TestErrorSchemaBase):
         # To maintain consistency in the key sequence across the db in tests
         serialized_data["import_errors"][0]["import_error_id"] = 1
         serialized_data["import_errors"][1]["import_error_id"] = 2
-        self.assertEqual(
-            {
-                "import_errors": [
-                    {
-                        "filename": "Lorem_ipsum.py",
-                        "import_error_id": 1,
-                        "stack_trace": "Lorem ipsum",
-                        "timestamp": "2020-06-10T12:02:44+00:00",
-                    },
-                    {
-                        "filename": "Lorem_ipsum.py",
-                        "import_error_id": 2,
-                        "stack_trace": "Lorem ipsum",
-                        "timestamp": "2020-06-10T12:02:44+00:00",
-                    },
-                ],
-                "total_entries": 2,
-            },
-            serialized_data,
-        )
+        assert {
+            "import_errors": [
+                {
+                    "filename": "Lorem_ipsum.py",
+                    "import_error_id": 1,
+                    "stack_trace": "Lorem ipsum",
+                    "timestamp": "2020-06-10T12:02:44+00:00",
+                },
+                {
+                    "filename": "Lorem_ipsum.py",
+                    "import_error_id": 2,
+                    "stack_trace": "Lorem ipsum",
+                    "timestamp": "2020-06-10T12:02:44+00:00",
+                },
+            ],
+            "total_entries": 2,
+        } == serialized_data
diff --git a/tests/api_connexion/schemas/test_event_log_schema.py b/tests/api_connexion/schemas/test_event_log_schema.py
index b4c2003..597ecc7 100644
--- a/tests/api_connexion/schemas/test_event_log_schema.py
+++ b/tests/api_connexion/schemas/test_event_log_schema.py
@@ -59,19 +59,16 @@ class TestEventLogSchema(TestEventLogSchemaBase):
         event_log_model.dttm = timezone.parse(self.default_time)
         log_model = session.query(Log).first()
         deserialized_log = event_log_schema.dump(log_model)
-        self.assertEqual(
-            deserialized_log,
-            {
-                "event_log_id": event_log_model.id,
-                "event": "TEST_EVENT",
-                "dag_id": "TEST_DAG_ID",
-                "task_id": "TEST_TASK_ID",
-                "execution_date": self.default_time,
-                "owner": 'airflow',
-                "when": self.default_time,
-                "extra": None,
-            },
-        )
+        assert deserialized_log == {
+            "event_log_id": event_log_model.id,
+            "event": "TEST_EVENT",
+            "dag_id": "TEST_DAG_ID",
+            "task_id": "TEST_TASK_ID",
+            "execution_date": self.default_time,
+            "owner": 'airflow',
+            "when": self.default_time,
+            "extra": None,
+        }
 
 
 class TestEventLogCollection(TestEventLogSchemaBase):
@@ -86,31 +83,28 @@ class TestEventLogCollection(TestEventLogSchemaBase):
         event_log_model_2.dttm = timezone.parse(self.default_time2)
         instance = EventLogCollection(event_logs=event_logs, total_entries=2)
         deserialized_event_logs = event_log_collection_schema.dump(instance)
-        self.assertEqual(
-            deserialized_event_logs,
-            {
-                "event_logs": [
-                    {
-                        "event_log_id": event_log_model_1.id,
-                        "event": "TEST_EVENT_1",
-                        "dag_id": "TEST_DAG_ID",
-                        "task_id": "TEST_TASK_ID",
-                        "execution_date": self.default_time,
-                        "owner": 'airflow',
-                        "when": self.default_time,
-                        "extra": None,
-                    },
-                    {
-                        "event_log_id": event_log_model_2.id,
-                        "event": "TEST_EVENT_2",
-                        "dag_id": "TEST_DAG_ID",
-                        "task_id": "TEST_TASK_ID",
-                        "execution_date": self.default_time,
-                        "owner": 'airflow',
-                        "when": self.default_time2,
-                        "extra": None,
-                    },
-                ],
-                "total_entries": 2,
-            },
-        )
+        assert deserialized_event_logs == {
+            "event_logs": [
+                {
+                    "event_log_id": event_log_model_1.id,
+                    "event": "TEST_EVENT_1",
+                    "dag_id": "TEST_DAG_ID",
+                    "task_id": "TEST_TASK_ID",
+                    "execution_date": self.default_time,
+                    "owner": 'airflow',
+                    "when": self.default_time,
+                    "extra": None,
+                },
+                {
+                    "event_log_id": event_log_model_2.id,
+                    "event": "TEST_EVENT_2",
+                    "dag_id": "TEST_DAG_ID",
+                    "task_id": "TEST_TASK_ID",
+                    "execution_date": self.default_time,
+                    "owner": 'airflow',
+                    "when": self.default_time2,
+                    "extra": None,
+                },
+            ],
+            "total_entries": 2,
+        }
diff --git a/tests/api_connexion/schemas/test_health_schema.py b/tests/api_connexion/schemas/test_health_schema.py
index e7e1ff6..339da2c 100644
--- a/tests/api_connexion/schemas/test_health_schema.py
+++ b/tests/api_connexion/schemas/test_health_schema.py
@@ -32,4 +32,4 @@ class TestHeathSchema(unittest.TestCase):
             },
         }
         serialized_data = health_schema.dump(payload)
-        self.assertDictEqual(serialized_data, payload)
+        assert serialized_data == payload
diff --git a/tests/api_connexion/schemas/test_pool_schemas.py b/tests/api_connexion/schemas/test_pool_schemas.py
index 53b6963..d1cb2c4 100644
--- a/tests/api_connexion/schemas/test_pool_schemas.py
+++ b/tests/api_connexion/schemas/test_pool_schemas.py
@@ -37,23 +37,20 @@ class TestPoolSchema(unittest.TestCase):
         session.commit()
         pool_instance = session.query(Pool).filter(Pool.pool == pool_model.pool).first()
         serialized_pool = pool_schema.dump(pool_instance)
-        self.assertEqual(
-            serialized_pool,
-            {
-                "name": "test_pool",
-                "slots": 2,
-                "occupied_slots": 0,
-                "running_slots": 0,
-                "queued_slots": 0,
-                "open_slots": 2,
-            },
-        )
+        assert serialized_pool == {
+            "name": "test_pool",
+            "slots": 2,
+            "occupied_slots": 0,
+            "running_slots": 0,
+            "queued_slots": 0,
+            "open_slots": 2,
+        }
 
     @provide_session
     def test_deserialize(self, session):
         pool_dict = {"name": "test_pool", "slots": 3}
         deserialized_pool = pool_schema.load(pool_dict, session=session)
-        self.assertNotIsInstance(deserialized_pool, Pool)  # Checks if load_instance is set to True
+        assert not isinstance(deserialized_pool, Pool)  # Checks if load_instance is set to True
 
 
 class TestPoolCollectionSchema(unittest.TestCase):
@@ -67,27 +64,24 @@ class TestPoolCollectionSchema(unittest.TestCase):
         pool_model_a = Pool(pool="test_pool_a", slots=3)
         pool_model_b = Pool(pool="test_pool_b", slots=3)
         instance = PoolCollection(pools=[pool_model_a, pool_model_b], total_entries=2)
-        self.assertEqual(
-            {
-                "pools": [
-                    {
-                        "name": "test_pool_a",
-                        "slots": 3,
-                        "occupied_slots": 0,
-                        "running_slots": 0,
-                        "queued_slots": 0,
-                        "open_slots": 3,
-                    },
-                    {
-                        "name": "test_pool_b",
-                        "slots": 3,
-                        "occupied_slots": 0,
-                        "running_slots": 0,
-                        "queued_slots": 0,
-                        "open_slots": 3,
-                    },
-                ],
-                "total_entries": 2,
-            },
-            pool_collection_schema.dump(instance),
-        )
+        assert {
+            "pools": [
+                {
+                    "name": "test_pool_a",
+                    "slots": 3,
+                    "occupied_slots": 0,
+                    "running_slots": 0,
+                    "queued_slots": 0,
+                    "open_slots": 3,
+                },
+                {
+                    "name": "test_pool_b",
+                    "slots": 3,
+                    "occupied_slots": 0,
+                    "running_slots": 0,
+                    "queued_slots": 0,
+                    "open_slots": 3,
+                },
+            ],
+            "total_entries": 2,
+        } == pool_collection_schema.dump(instance)
diff --git a/tests/api_connexion/schemas/test_task_instance_schema.py b/tests/api_connexion/schemas/test_task_instance_schema.py
index 88ee16c..9720ac0 100644
--- a/tests/api_connexion/schemas/test_task_instance_schema.py
+++ b/tests/api_connexion/schemas/test_task_instance_schema.py
@@ -19,6 +19,7 @@ import datetime as dt
 import getpass
 import unittest
 
+import pytest
 from marshmallow import ValidationError
 from parameterized import parameterized
 
@@ -88,7 +89,7 @@ class TestTaskInstanceSchema(unittest.TestCase):
             "try_number": 0,
             "unixname": getpass.getuser(),
         }
-        self.assertDictEqual(serialized_ti, expected_json)
+        assert serialized_ti == expected_json
 
     @provide_session
     def test_task_instance_schema_with_sla(self, session):
@@ -134,7 +135,7 @@ class TestTaskInstanceSchema(unittest.TestCase):
             "try_number": 0,
             "unixname": getpass.getuser(),
         }
-        self.assertDictEqual(serialized_ti, expected_json)
+        assert serialized_ti == expected_json
 
 
 class TestClearTaskInstanceFormSchema(unittest.TestCase):
@@ -163,7 +164,7 @@ class TestClearTaskInstanceFormSchema(unittest.TestCase):
         ]
     )
     def test_validation_error(self, payload):
-        with self.assertRaises(ValidationError):
+        with pytest.raises(ValidationError):
             clear_task_instance_form.load(payload)
 
 
@@ -193,7 +194,7 @@ class TestSetTaskInstanceStateFormSchema(unittest.TestCase):
             'new_state': 'failed',
             'task_id': 'print_the_context',
         }
-        self.assertEqual(expected_result, result)
+        assert expected_result == result
 
     @parameterized.expand(
         [
@@ -206,5 +207,5 @@ class TestSetTaskInstanceStateFormSchema(unittest.TestCase):
     def test_validation_error(self, override_data):
         self.current_input.update(override_data)
 
-        with self.assertRaises(ValidationError):
+        with pytest.raises(ValidationError):
             clear_task_instance_form.load(self.current_input)
diff --git a/tests/api_connexion/schemas/test_version_schema.py b/tests/api_connexion/schemas/test_version_schema.py
index 2705910..8cb654b 100644
--- a/tests/api_connexion/schemas/test_version_schema.py
+++ b/tests/api_connexion/schemas/test_version_schema.py
@@ -35,4 +35,4 @@ class TestVersionInfoSchema(unittest.TestCase):
         current_data = version_info_schema.dump(version_info)
 
         expected_result = {'version': 'VERSION', 'git_version': git_commit}
-        self.assertEqual(expected_result, current_data)
+        assert expected_result == current_data
diff --git a/tests/api_connexion/schemas/test_xcom_schema.py b/tests/api_connexion/schemas/test_xcom_schema.py
index 846f727..b541ebd 100644
--- a/tests/api_connexion/schemas/test_xcom_schema.py
+++ b/tests/api_connexion/schemas/test_xcom_schema.py
@@ -64,16 +64,13 @@ class TestXComCollectionItemSchema(TestXComSchemaBase):
         session.commit()
         xcom_model = session.query(XCom).first()
         deserialized_xcom = xcom_collection_item_schema.dump(xcom_model)
-        self.assertEqual(
-            deserialized_xcom,
-            {
-                'key': 'test_key',
-                'timestamp': self.default_time,
-                'execution_date': self.default_time,
-                'task_id': 'test_task_id',
-                'dag_id': 'test_dag',
-            },
-        )
+        assert deserialized_xcom == {
+            'key': 'test_key',
+            'timestamp': self.default_time,
+            'execution_date': self.default_time,
+            'task_id': 'test_task_id',
+            'dag_id': 'test_dag',
+        }
 
     def test_deserialize(self):
         xcom_dump = {
@@ -84,16 +81,13 @@ class TestXComCollectionItemSchema(TestXComSchemaBase):
             'dag_id': 'test_dag',
         }
         result = xcom_collection_item_schema.load(xcom_dump)
-        self.assertEqual(
-            result,
-            {
-                'key': 'test_key',
-                'timestamp': self.default_time_parsed,
-                'execution_date': self.default_time_parsed,
-                'task_id': 'test_task_id',
-                'dag_id': 'test_dag',
-            },
-        )
+        assert result == {
+            'key': 'test_key',
+            'timestamp': self.default_time_parsed,
+            'execution_date': self.default_time_parsed,
+            'task_id': 'test_task_id',
+            'dag_id': 'test_dag',
+        }
 
 
 class TestXComCollectionSchema(TestXComSchemaBase):
@@ -133,28 +127,25 @@ class TestXComCollectionSchema(TestXComSchemaBase):
                 total_entries=xcom_models_query.count(),
             )
         )
-        self.assertEqual(
-            deserialized_xcoms,
-            {
-                'xcom_entries': [
-                    {
-                        'key': 'test_key_1',
-                        'timestamp': self.default_time_1,
-                        'execution_date': self.default_time_1,
-                        'task_id': 'test_task_id_1',
-                        'dag_id': 'test_dag_1',
-                    },
-                    {
-                        'key': 'test_key_2',
-                        'timestamp': self.default_time_2,
-                        'execution_date': self.default_time_2,
-                        'task_id': 'test_task_id_2',
-                        'dag_id': 'test_dag_2',
-                    },
-                ],
-                'total_entries': len(xcom_models),
-            },
-        )
+        assert deserialized_xcoms == {
+            'xcom_entries': [
+                {
+                    'key': 'test_key_1',
+                    'timestamp': self.default_time_1,
+                    'execution_date': self.default_time_1,
+                    'task_id': 'test_task_id_1',
+                    'dag_id': 'test_dag_1',
+                },
+                {
+                    'key': 'test_key_2',
+                    'timestamp': self.default_time_2,
+                    'execution_date': self.default_time_2,
+                    'task_id': 'test_task_id_2',
+                    'dag_id': 'test_dag_2',
+                },
+            ],
+            'total_entries': len(xcom_models),
+        }
 
 
 class TestXComSchema(TestXComSchemaBase):
@@ -177,17 +168,14 @@ class TestXComSchema(TestXComSchemaBase):
         session.commit()
         xcom_model = session.query(XCom).first()
         deserialized_xcom = xcom_schema.dump(xcom_model)
-        self.assertEqual(
-            deserialized_xcom,
-            {
-                'key': 'test_key',
-                'timestamp': self.default_time,
-                'execution_date': self.default_time,
-                'task_id': 'test_task_id',
-                'dag_id': 'test_dag',
-                'value': 'test_binary',
-            },
-        )
+        assert deserialized_xcom == {
+            'key': 'test_key',
+            'timestamp': self.default_time,
+            'execution_date': self.default_time,
+            'task_id': 'test_task_id',
+            'dag_id': 'test_dag',
+            'value': 'test_binary',
+        }
 
     def test_deserialize(self):
         xcom_dump = {
@@ -199,14 +187,11 @@ class TestXComSchema(TestXComSchemaBase):
             'value': b'test_binary',
         }
         result = xcom_schema.load(xcom_dump)
-        self.assertEqual(
-            result,
-            {
-                'key': 'test_key',
-                'timestamp': self.default_time_parsed,
-                'execution_date': self.default_time_parsed,
-                'task_id': 'test_task_id',
-                'dag_id': 'test_dag',
-                'value': 'test_binary',
-            },
-        )
+        assert result == {
+            'key': 'test_key',
+            'timestamp': self.default_time_parsed,
+            'execution_date': self.default_time_parsed,
+            'task_id': 'test_task_id',
+            'dag_id': 'test_dag',
+            'value': 'test_binary',
+        }
diff --git a/tests/api_connexion/test_error_handling.py b/tests/api_connexion/test_error_handling.py
index e921aea..cfd33da 100644
--- a/tests/api_connexion/test_error_handling.py
+++ b/tests/api_connexion/test_error_handling.py
@@ -37,7 +37,7 @@ class TestErrorHandling(unittest.TestCase):
 
         # Then we have parsable JSON as output
 
-        self.assertEqual(404, resp_json["status"])
+        assert 404 == resp_json["status"]
 
         # When we are hitting non-api incorrect enpoint
 
@@ -45,8 +45,8 @@ class TestErrorHandling(unittest.TestCase):
 
         # Then we do not have JSON as response, rather standard HTML
 
-        self.assertIsNone(resp_json)
+        assert resp_json is None
 
         resp_json = self.client.put("/api/v1/variables").json
 
-        self.assertEqual('Method Not Allowed', resp_json["title"])
+        assert 'Method Not Allowed' == resp_json["title"]
diff --git a/tests/api_connexion/test_parameters.py b/tests/api_connexion/test_parameters.py
index 1f625dc..6b9e59c 100644
--- a/tests/api_connexion/test_parameters.py
+++ b/tests/api_connexion/test_parameters.py
@@ -18,6 +18,7 @@
 import unittest
 from unittest import mock
 
+import pytest
 from pendulum import DateTime
 from pendulum.tz.timezone import Timezone
 
@@ -40,7 +41,7 @@ class TestValidateIsTimezone(unittest.TestCase):
         self.timezoned = datetime.now(tz=timezone.utc)
 
     def test_gives_400_for_naive(self):
-        with self.assertRaises(BadRequest):
+        with pytest.raises(BadRequest):
             validate_istimezone(self.naive)
 
     def test_timezone_passes(self):
@@ -66,7 +67,7 @@ class TestDateTimeParser(unittest.TestCase):
 
     def test_raises_400_for_invalid_arg(self):
         invalid_datetime = '2020-06-13T22:44:00P'
-        with self.assertRaises(BadRequest):
+        with pytest.raises(BadRequest):
             format_datetime(invalid_datetime)
 
 
@@ -74,26 +75,26 @@ class TestMaximumPagelimit(unittest.TestCase):
     @conf_vars({("api", "maximum_page_limit"): "320"})
     def test_maximum_limit_return_val(self):
         limit = check_limit(300)
-        self.assertEqual(limit, 300)
+        assert limit == 300
 
     @conf_vars({("api", "maximum_page_limit"): "320"})
     def test_maximum_limit_returns_configured_if_limit_above_conf(self):
         limit = check_limit(350)
-        self.assertEqual(limit, 320)
+        assert limit == 320
 
     @conf_vars({("api", "maximum_page_limit"): "1000"})
     def test_limit_returns_set_max_if_give_limit_is_exceeded(self):
         limit = check_limit(1500)
-        self.assertEqual(limit, 1000)
+        assert limit == 1000
 
     @conf_vars({("api", "fallback_page_limit"): "100"})
     def test_limit_of_zero_returns_default(self):
         limit = check_limit(0)
-        self.assertEqual(limit, 100)
+        assert limit == 100
 
     @conf_vars({("api", "maximum_page_limit"): "1500"})
     def test_negative_limit_raises(self):
-        with self.assertRaises(BadRequest):
+        with pytest.raises(BadRequest):
             check_limit(-1)
 
 
@@ -111,7 +112,7 @@ class TestFormatParameters(unittest.TestCase):
         decorator = format_parameters({"param_a": format_datetime})
         endpoint = mock.MagicMock()
         decorated_endpoint = decorator(endpoint)
-        with self.assertRaises(BadRequest):
+        with pytest.raises(BadRequest):
             decorated_endpoint(param_a='XXXXX')
 
     @conf_vars({("api", "maximum_page_limit"): "100"})
diff --git a/tests/cli/commands/test_celery_command.py b/tests/cli/commands/test_celery_command.py
index 2a37e27..41893b7 100644
--- a/tests/cli/commands/test_celery_command.py
+++ b/tests/cli/commands/test_celery_command.py
@@ -38,9 +38,9 @@ class TestWorkerPrecheck(unittest.TestCase):
         by mocking validate_session method
         """
         mock_validate_session.return_value = False
-        with self.assertRaises(SystemExit) as cm:
+        with pytest.raises(SystemExit) as ctx:
             celery_command.worker(Namespace(queues=1, concurrency=1))
-        self.assertEqual(str(cm.exception), "Worker exiting, database connection precheck failed.")
+        assert str(ctx.value) == "Worker exiting, database connection precheck failed."
 
     @conf_vars({('celery', 'worker_precheck'): 'False'})
     def test_worker_precheck_exception(self):
@@ -48,7 +48,7 @@ class TestWorkerPrecheck(unittest.TestCase):
         Test to check the behaviour of validate_session method
         when worker_precheck is absent in airflow configuration
         """
-        self.assertTrue(airflow.settings.validate_session())
+        assert airflow.settings.validate_session()
 
     @mock.patch('sqlalchemy.orm.session.Session.execute')
     @conf_vars({('celery', 'worker_precheck'): 'True'})
@@ -57,7 +57,7 @@ class TestWorkerPrecheck(unittest.TestCase):
         Test to validate connection failure scenario on SELECT 1 query
         """
         mock_session.side_effect = sqlalchemy.exc.OperationalError("m1", "m2", "m3", "m4")
-        self.assertEqual(airflow.settings.validate_session(), False)
+        assert airflow.settings.validate_session() is False
 
 
 @pytest.mark.integration("redis")
@@ -105,7 +105,7 @@ class TestCeleryStopCommand(unittest.TestCase):
         pid = "123"
 
         # Calling stop_worker should delete the temporary pid file
-        with self.assertRaises(FileNotFoundError):
+        with pytest.raises(FileNotFoundError):
             with NamedTemporaryFile("w+") as f:
                 # Create pid file
                 f.write(pid)
diff --git a/tests/cli/commands/test_cheat_sheet_command.py b/tests/cli/commands/test_cheat_sheet_command.py
index 3928106..5edc4b7 100644
--- a/tests/cli/commands/test_cheat_sheet_command.py
+++ b/tests/cli/commands/test_cheat_sheet_command.py
@@ -100,6 +100,6 @@ class TestCheatSheetCommand(unittest.TestCase):
             args = self.parser.parse_args(['cheat-sheet'])
             args.func(args)
         output = temp_stdout.getvalue()
-        self.assertIn(ALL_COMMANDS, output)
-        self.assertIn(SECTION_A, output)
-        self.assertIn(SECTION_E, output)
+        assert ALL_COMMANDS in output
+        assert SECTION_A in output
+        assert SECTION_E in output
diff --git a/tests/cli/commands/test_config_command.py b/tests/cli/commands/test_config_command.py
index 532a95f..b665db7 100644
--- a/tests/cli/commands/test_config_command.py
+++ b/tests/cli/commands/test_config_command.py
@@ -19,6 +19,8 @@ import io
 import unittest
 from unittest import mock
 
+import pytest
+
 from airflow.cli import cli_parser
 from airflow.cli.commands import config_command
 from tests.test_utils.config import conf_vars
@@ -39,8 +41,8 @@ class TestCliConfigList(unittest.TestCase):
     def test_cli_show_config_should_display_key(self):
         with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
             config_command.show_config(self.parser.parse_args(['config', 'list', '--color', 'off']))
-        self.assertIn('[core]', temp_stdout.getvalue())
-        self.assertIn('testkey = test_value', temp_stdout.getvalue())
+        assert '[core]' in temp_stdout.getvalue()
+        assert 'testkey = test_value' in temp_stdout.getvalue()
 
 
 class TestCliConfigGetValue(unittest.TestCase):
@@ -53,28 +55,26 @@ class TestCliConfigGetValue(unittest.TestCase):
         with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
             config_command.get_value(self.parser.parse_args(['config', 'get-value', 'core', 'test_key']))
 
-        self.assertEqual("test_value", temp_stdout.getvalue().strip())
+        assert "test_value" == temp_stdout.getvalue().strip()
 
     @mock.patch("airflow.cli.commands.config_command.conf")
     def test_should_raise_exception_when_section_is_missing(self, mock_conf):
         mock_conf.has_section.return_value = False
         mock_conf.has_option.return_value = True
 
-        with self.assertRaises(SystemExit) as err:
+        with pytest.raises(SystemExit) as ctx:
             config_command.get_value(
                 self.parser.parse_args(['config', 'get-value', 'missing-section', 'dags_folder'])
             )
-        self.assertEqual("The section [missing-section] is not found in config.", str(err.exception))
+        assert "The section [missing-section] is not found in config." == str(ctx.value)
 
     @mock.patch("airflow.cli.commands.config_command.conf")
     def test_should_raise_exception_when_option_is_missing(self, mock_conf):
         mock_conf.has_section.return_value = True
         mock_conf.has_option.return_value = False
 
-        with self.assertRaises(SystemExit) as err:
+        with pytest.raises(SystemExit) as ctx:
             config_command.get_value(
                 self.parser.parse_args(['config', 'get-value', 'missing-section', 'dags_folder'])
             )
-        self.assertEqual(
-            "The option [missing-section/dags_folder] is not found in config.", str(err.exception)
-        )
+        assert "The option [missing-section/dags_folder] is not found in config." == str(ctx.value)
diff --git a/tests/cli/commands/test_connection_command.py b/tests/cli/commands/test_connection_command.py
index 1da1174..ae78892 100644
--- a/tests/cli/commands/test_connection_command.py
+++ b/tests/cli/commands/test_connection_command.py
@@ -22,6 +22,7 @@ import unittest
 from contextlib import redirect_stdout
 from unittest import mock
 
+import pytest
 from parameterized import parameterized
 
 from airflow.cli import cli_parser
@@ -46,10 +47,10 @@ class TestCliGetConnection(unittest.TestCase):
                 self.parser.parse_args(["connections", "get", "google_cloud_default", "--output", "json"])
             )
             stdout = stdout.getvalue()
-        self.assertIn("google-cloud-platform:///default", stdout)
+        assert "google-cloud-platform:///default" in stdout
 
     def test_cli_connection_get_invalid(self):
-        with self.assertRaisesRegex(SystemExit, re.escape("Connection not found.")):
+        with pytest.raises(SystemExit, match=re.escape("Connection not found.")):
             connection_command.connections_get(self.parser.parse_args(["connections", "get", "INVALID"]))
 
 
@@ -120,8 +121,8 @@ class TestCliListConnections(unittest.TestCase):
             stdout = stdout.getvalue()
 
         for conn_id, conn_type in self.EXPECTED_CONS:
-            self.assertIn(conn_type, stdout)
-            self.assertIn(conn_id, stdout)
+            assert conn_type in stdout
+            assert conn_id in stdout
 
     def test_cli_connections_filter_conn_id(self):
         args = self.parser.parse_args(
@@ -132,7 +133,7 @@ class TestCliListConnections(unittest.TestCase):
             connection_command.connections_list(args)
             stdout = stdout.getvalue()
 
-        self.assertIn("http_default", stdout)
+        assert "http_default" in stdout
 
 
 class TestCliExportConnections(unittest.TestCase):
@@ -169,7 +170,7 @@ class TestCliExportConnections(unittest.TestCase):
         clear_db_connections()
 
     def test_cli_connections_export_should_return_error_for_invalid_command(self):
-        with self.assertRaises(SystemExit):
+        with pytest.raises(SystemExit):
             self.parser.parse_args(
                 [
                     "connections",
@@ -178,7 +179,7 @@ class TestCliExportConnections(unittest.TestCase):
             )
 
     def test_cli_connections_export_should_return_error_for_invalid_format(self):
-        with self.assertRaises(SystemExit):
+        with pytest.raises(SystemExit):
             self.parser.parse_args(["connections", "export", "--format", "invalid", "/path/to/file"])
 
     @mock.patch('os.path.splitext')
@@ -196,8 +197,8 @@ class TestCliExportConnections(unittest.TestCase):
                 output_filepath,
             ]
         )
-        with self.assertRaisesRegex(
-            SystemExit, r"Unsupported file format. The file must have the extension .yaml, .json, .env"
+        with pytest.raises(
+            SystemExit, match=r"Unsupported file format. The file must have the extension .yaml, .json, .env"
         ):
             connection_command.connections_export(args)
 
@@ -226,7 +227,7 @@ class TestCliExportConnections(unittest.TestCase):
                 output_filepath,
             ]
         )
-        with self.assertRaisesRegex(Exception, r"dummy exception"):
+        with pytest.raises(Exception, match=r"dummy exception"):
             connection_command.connections_export(args)
 
         mock_splittext.assert_not_called()
@@ -256,7 +257,7 @@ class TestCliExportConnections(unittest.TestCase):
                 output_filepath,
             ]
         )
-        with self.assertRaisesRegex(Exception, r"dummy exception"):
+        with pytest.raises(Exception, match=r"dummy exception"):
             connection_command.connections_export(args)
 
         mock_splittext.assert_called_once()
@@ -396,7 +397,7 @@ class TestCliExportConnections(unittest.TestCase):
         mock_splittext.assert_called_once()
         mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
         mock_file_open.return_value.write.assert_called_once_with(mock.ANY)
-        self.assertIn(mock_file_open.return_value.write.call_args_list[0][0][0], expected_connections)
+        assert mock_file_open.return_value.write.call_args_list[0][0][0] in expected_connections
 
     @mock.patch('os.path.splitext')
     @mock.patch('builtins.open', new_callable=mock.mock_open())
@@ -425,7 +426,7 @@ class TestCliExportConnections(unittest.TestCase):
         mock_splittext.assert_called_once()
         mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
         mock_file_open.return_value.write.assert_called_once_with(mock.ANY)
-        self.assertIn(mock_file_open.return_value.write.call_args_list[0][0][0], expected_connections)
+        assert mock_file_open.return_value.write.call_args_list[0][0][0] in expected_connections
 
     @mock.patch('os.path.splitext')
     @mock.patch('builtins.open', new_callable=mock.mock_open())
@@ -631,7 +632,7 @@ class TestCliAddConnections(unittest.TestCase):
 
         stdout = stdout.getvalue()
 
-        self.assertIn(expected_output, stdout)
+        assert expected_output in stdout
         conn_id = cmd[2]
         with create_session() as session:
             comparable_attrs = [
@@ -645,7 +646,7 @@ class TestCliAddConnections(unittest.TestCase):
                 "schema",
             ]
             current_conn = session.query(Connection).filter(Connection.conn_id == conn_id).first()
-            self.assertEqual(expected_conn, {attr: getattr(current_conn, attr) for attr in comparable_attrs})
+            assert expected_conn == {attr: getattr(current_conn, attr) for attr in comparable_attrs}
 
     def test_cli_connections_add_duplicate(self):
         conn_id = "to_be_duplicated"
@@ -653,21 +654,22 @@ class TestCliAddConnections(unittest.TestCase):
             self.parser.parse_args(["connections", "add", conn_id, "--conn-uri=%s" % TEST_URL])
         )
         # Check for addition attempt
-        with self.assertRaisesRegex(SystemExit, rf"A connection with `conn_id`={conn_id} already exists"):
+        with pytest.raises(SystemExit, match=rf"A connection with `conn_id`={conn_id} already exists"):
             connection_command.connections_add(
                 self.parser.parse_args(["connections", "add", conn_id, "--conn-uri=%s" % TEST_URL])
             )
 
     def test_cli_connections_add_delete_with_missing_parameters(self):
         # Attempt to add without providing conn_uri
-        with self.assertRaisesRegex(
-            SystemExit, r"The following args are required to add a connection: \['conn-uri or conn-type'\]"
+        with pytest.raises(
+            SystemExit,
+            match=r"The following args are required to add a connection: \['conn-uri or conn-type'\]",
         ):
             connection_command.connections_add(self.parser.parse_args(["connections", "add", "new1"]))
 
     def test_cli_connections_add_invalid_uri(self):
         # Attempt to add with invalid uri
-        with self.assertRaisesRegex(SystemExit, r"The URI provided to --conn-uri is invalid: nonsense_uri"):
+        with pytest.raises(SystemExit, match=r"The URI provided to --conn-uri is invalid: nonsense_uri"):
             connection_command.connections_add(
                 self.parser.parse_args(["connections", "add", "new1", "--conn-uri=%s" % "nonsense_uri"])
             )
@@ -703,14 +705,14 @@ class TestCliDeleteConnections(unittest.TestCase):
             stdout = stdout.getvalue()
 
         # Check deletion stdout
-        self.assertIn("Successfully deleted connection with `conn_id`=new1", stdout)
+        assert "Successfully deleted connection with `conn_id`=new1" in stdout
 
         # Check deletions
         result = session.query(Connection).filter(Connection.conn_id == "new1").first()
 
-        self.assertTrue(result is None)
+        assert result is None
 
     def test_cli_delete_invalid_connection(self):
         # Attempt to delete a non-existing connection
-        with self.assertRaisesRegex(SystemExit, r"Did not find a connection with `conn_id`=fake"):
+        with pytest.raises(SystemExit, match=r"Did not find a connection with `conn_id`=fake"):
             connection_command.connections_delete(self.parser.parse_args(["connections", "delete", "fake"]))
diff --git a/tests/cli/commands/test_dag_command.py b/tests/cli/commands/test_dag_command.py
index 173a7bf..ed696b0 100644
--- a/tests/cli/commands/test_dag_command.py
+++ b/tests/cli/commands/test_dag_command.py
@@ -23,6 +23,8 @@ import unittest
 from datetime import datetime, timedelta
 from unittest import mock
 
+import pytest
+
 from airflow import settings
 from airflow.cli import cli_parser
 from airflow.cli.commands import dag_command
@@ -104,8 +106,8 @@ class TestCliDags(unittest.TestCase):
             )
 
         output = stdout.getvalue()
-        self.assertIn(f"Dry run of DAG example_bash_operator on {DEFAULT_DATE.isoformat()}\n", output)
-        self.assertIn("Task runme_0\n", output)
+        assert f"Dry run of DAG example_bash_operator on {DEFAULT_DATE.isoformat()}\n" in output
+        assert "Task runme_0\n" in output
 
         mock_run.assert_not_called()  # Dry run shouldn't run the backfill
 
@@ -160,9 +162,9 @@ class TestCliDags(unittest.TestCase):
         with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
             dag_command.dag_show(self.parser.parse_args(['dags', 'show', 'example_bash_operator']))
         out = temp_stdout.getvalue()
-        self.assertIn("label=example_bash_operator", out)
-        self.assertIn("graph [label=example_bash_operator labelloc=t rankdir=LR]", out)
-        self.assertIn("runme_2 -> run_after_loop", out)
+        assert "label=example_bash_operator" in out
+        assert "graph [label=example_bash_operator labelloc=t rankdir=LR]" in out
+        assert "runme_2 -> run_after_loop" in out
 
     @mock.patch("airflow.cli.commands.dag_command.render_dag")
     def test_show_dag_dave(self, mock_render_dag):
@@ -174,7 +176,7 @@ class TestCliDags(unittest.TestCase):
         mock_render_dag.return_value.render.assert_called_once_with(
             cleanup=True, filename='awesome', format='png'
         )
-        self.assertIn("File awesome.png saved", out)
+        assert "File awesome.png saved" in out
 
     @mock.patch("airflow.cli.commands.dag_command.subprocess.Popen")
     @mock.patch("airflow.cli.commands.dag_command.render_dag")
@@ -188,8 +190,8 @@ class TestCliDags(unittest.TestCase):
         out = temp_stdout.getvalue()
         mock_render_dag.return_value.pipe.assert_called_once_with(format='png')
         mock_popen.return_value.communicate.assert_called_once_with(b'DOT_DATA')
-        self.assertIn("OUT", out)
-        self.assertIn("ERR", out)
+        assert "OUT" in out
+        assert "ERR" in out
 
     @mock.patch("airflow.cli.commands.dag_command.DAG.run")
     def test_cli_backfill_depends_on_past(self, mock_run):
@@ -289,7 +291,7 @@ class TestCliDags(unittest.TestCase):
             out = temp_stdout.getvalue()
             # `next_execution` function is inapplicable if no execution record found
             # It prints `None` in such cases
-            self.assertIn("None", out)
+            assert "None" in out
 
         # The details below is determined by the schedule_interval of example DAGs
         now = DEFAULT_DATE
@@ -313,14 +315,14 @@ class TestCliDags(unittest.TestCase):
             with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
                 dag_command.dag_next_execution(args)
                 out = temp_stdout.getvalue()
-            self.assertIn(expected_output[i], out)
+            assert expected_output[i] in out
 
             # Test num-executions = 2
             args = self.parser.parse_args(['dags', 'next-execution', dag_id, '--num-executions', '2'])
             with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
                 dag_command.dag_next_execution(args)
                 out = temp_stdout.getvalue()
-            self.assertIn(expected_output_2[i], out)
+            assert expected_output_2[i] in out
 
         # Clean up before leaving
         with create_session() as session:
@@ -334,8 +336,8 @@ class TestCliDags(unittest.TestCase):
             dag_command.dag_report(args)
             out = temp_stdout.getvalue()
 
-        self.assertIn("airflow/example_dags/example_complex.py", out)
-        self.assertIn("example_complex", out)
+        assert "airflow/example_dags/example_complex.py" in out
+        assert "example_complex" in out
 
     @conf_vars({('core', 'load_examples'): 'true'})
     def test_cli_list_dags(self):
@@ -343,11 +345,11 @@ class TestCliDags(unittest.TestCase):
         with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
             dag_command.dag_list_dags(args)
             out = temp_stdout.getvalue()
-        self.assertIn("owner", out)
-        self.assertIn("airflow", out)
-        self.assertIn("paused", out)
-        self.assertIn("airflow/example_dags/example_complex.py", out)
-        self.assertIn("False", out)
+        assert "owner" in out
+        assert "airflow" in out
+        assert "paused" in out
+        assert "airflow/example_dags/example_complex.py" in out
+        assert "False" in out
 
     def test_cli_list_dag_runs(self):
         dag_command.dag_trigger(
@@ -394,31 +396,30 @@ class TestCliDags(unittest.TestCase):
     def test_pause(self):
         args = self.parser.parse_args(['dags', 'pause', 'example_bash_operator'])
         dag_command.dag_pause(args)
-        self.assertIn(self.dagbag.dags['example_bash_operator'].get_is_paused(), [True, 1])
+        assert self.dagbag.dags['example_bash_operator'].get_is_paused() in [True, 1]
 
         args = self.parser.parse_args(['dags', 'unpause', 'example_bash_operator'])
         dag_command.dag_unpause(args)
-        self.assertIn(self.dagbag.dags['example_bash_operator'].get_is_paused(), [False, 0])
+        assert self.dagbag.dags['example_bash_operator'].get_is_paused() in [False, 0]
 
     def test_trigger_dag(self):
         dag_command.dag_trigger(
             self.parser.parse_args(['dags', 'trigger', 'example_bash_operator', '--conf', '{"foo": "bar"}'])
         )
-        self.assertRaises(
-            ValueError,
-            dag_command.dag_trigger,
-            self.parser.parse_args(
-                [
-                    'dags',
-                    'trigger',
-                    'example_bash_operator',
-                    '--run-id',
-                    'trigger_dag_xxx',
-                    '--conf',
-                    'NOT JSON',
-                ]
-            ),
-        )
+        with pytest.raises(ValueError):
+            dag_command.dag_trigger(
+                self.parser.parse_args(
+                    [
+                        'dags',
+                        'trigger',
+                        'example_bash_operator',
+                        '--run-id',
+                        'trigger_dag_xxx',
+                        '--conf',
+                        'NOT JSON',
+                    ]
+                ),
+            )
 
     def test_delete_dag(self):
         DM = DagModel
@@ -427,12 +428,11 @@ class TestCliDags(unittest.TestCase):
         session.add(DM(dag_id=key))
         session.commit()
         dag_command.dag_delete(self.parser.parse_args(['dags', 'delete', key, '--yes']))
-        self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
-        self.assertRaises(
-            AirflowException,
-            dag_command.dag_delete,
-            self.parser.parse_args(['dags', 'delete', 'does_not_exist_dag', '--yes']),
-        )
+        assert session.query(DM).filter_by(dag_id=key).count() == 0
+        with pytest.raises(AirflowException):
+            dag_command.dag_delete(
+                self.parser.parse_args(['dags', 'delete', 'does_not_exist_dag', '--yes']),
+            )
 
     def test_delete_dag_existing_file(self):
         # Test to check that the DAG should be deleted even if
@@ -444,18 +444,18 @@ class TestCliDags(unittest.TestCase):
             session.add(DM(dag_id=key, fileloc=f.name))
             session.commit()
             dag_command.dag_delete(self.parser.parse_args(['dags', 'delete', key, '--yes']))
-            self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
+            assert session.query(DM).filter_by(dag_id=key).count() == 0
 
     def test_cli_list_jobs(self):
         args = self.parser.parse_args(['dags', 'list-jobs'])
         dag_command.dag_list_jobs(args)
 
     def test_dag_state(self):
-        self.assertEqual(
-            None,
+        assert (
             dag_command.dag_state(
                 self.parser.parse_args(['dags', 'state', 'example_bash_operator', DEFAULT_DATE.isoformat()])
-            ),
+            )
+            is None
         )
 
     @mock.patch("airflow.cli.commands.dag_command.DebugExecutor")
@@ -510,4 +510,4 @@ class TestCliDags(unittest.TestCase):
             ]
         )
         mock_render_dag.assert_has_calls([mock.call(mock_get_dag.return_value, tis=[])])
-        self.assertIn("SOURCE", output)
+        assert "SOURCE" in output
diff --git a/tests/cli/commands/test_db_command.py b/tests/cli/commands/test_db_command.py
index 9c6ad55..4a53b98 100644
--- a/tests/cli/commands/test_db_command.py
+++ b/tests/cli/commands/test_db_command.py
@@ -18,6 +18,7 @@
 import unittest
 from unittest import mock
 
+import pytest
 from sqlalchemy.engine.url import make_url
 
 from airflow.cli import cli_parser
@@ -99,16 +100,13 @@ class TestCliDb(unittest.TestCase):
         _, kwargs = mock_execute_interactive.call_args
         env = kwargs['env']
         postgres_env = {k: v for k, v in env.items() if k.startswith('PG')}
-        self.assertEqual(
-            {
-                'PGDATABASE': 'airflow',
-                'PGHOST': 'postgres',
-                'PGPASSWORD': 'airflow',
-                'PGPORT': '5432',
-                'PGUSER': 'postgres',
-            },
-            postgres_env,
-        )
+        assert {
+            'PGDATABASE': 'airflow',
+            'PGHOST': 'postgres',
+            'PGPASSWORD': 'airflow',
+            'PGPORT': '5432',
+            'PGUSER': 'postgres',
+        } == postgres_env
 
     @mock.patch("airflow.cli.commands.db_command.execute_interactive")
     @mock.patch(
@@ -121,21 +119,18 @@ class TestCliDb(unittest.TestCase):
         _, kwargs = mock_execute_interactive.call_args
         env = kwargs['env']
         postgres_env = {k: v for k, v in env.items() if k.startswith('PG')}
-        self.assertEqual(
-            {
-                'PGDATABASE': 'airflow',
-                'PGHOST': 'postgres',
-                'PGPASSWORD': 'airflow',
-                'PGPORT': '5432',
-                'PGUSER': 'postgres',
-            },
-            postgres_env,
-        )
+        assert {
+            'PGDATABASE': 'airflow',
+            'PGHOST': 'postgres',
+            'PGPASSWORD': 'airflow',
+            'PGPORT': '5432',
+            'PGUSER': 'postgres',
+        } == postgres_env
 
     @mock.patch(
         "airflow.cli.commands.db_command.settings.engine.url",
         make_url("invalid+psycopg2://postgres:airflow@postgres/airflow"),
     )
     def test_cli_shell_invalid(self):
-        with self.assertRaisesRegex(AirflowException, r"Unknown driver: invalid\+psycopg2"):
+        with pytest.raises(AirflowException, match=r"Unknown driver: invalid\+psycopg2"):
             db_command.shell(self.parser.parse_args(['db', 'shell']))
diff --git a/tests/cli/commands/test_info_command.py b/tests/cli/commands/test_info_command.py
index e6c8de4..7fad6e8 100644
--- a/tests/cli/commands/test_info_command.py
+++ b/tests/cli/commands/test_info_command.py
@@ -46,7 +46,7 @@ class TestPiiAnonymizer(unittest.TestCase):
 
     def test_should_remove_pii_from_path(self):
         home_path = os.path.expanduser("~/airflow/config")
-        self.assertEqual("${HOME}/airflow/config", self.instance.process_path(home_path))
+        assert "${HOME}/airflow/config" == self.instance.process_path(home_path)
 
     @parameterized.expand(
         [
@@ -69,29 +69,29 @@ class TestPiiAnonymizer(unittest.TestCase):
         ]
     )
     def test_should_remove_pii_from_url(self, before, after):
-        self.assertEqual(after, self.instance.process_url(before))
+        assert after == self.instance.process_url(before)
 
 
 class TestAirflowInfo(unittest.TestCase):
     def test_info(self):
         instance = info_command.AirflowInfo(info_command.NullAnonymizer())
         text = capture_show_output(instance)
-        self.assertIn("Apache Airflow", text)
-        self.assertIn(airflow_version, text)
+        assert "Apache Airflow" in text
+        assert airflow_version in text
 
 
 class TestSystemInfo(unittest.TestCase):
     def test_info(self):
         instance = info_command.SystemInfo(info_command.NullAnonymizer())
         text = capture_show_output(instance)
-        self.assertIn("System info", text)
+        assert "System info" in text
 
 
 class TestPathsInfo(unittest.TestCase):
     def test_info(self):
         instance = info_command.PathsInfo(info_command.NullAnonymizer())
         text = capture_show_output(instance)
-        self.assertIn("Paths info", text)
+        assert "Paths info" in text
 
 
 class TestConfigInfo(unittest.TestCase):
@@ -107,11 +107,11 @@ class TestConfigInfo(unittest.TestCase):
     def test_should_read_config(self):
         instance = info_command.ConfigInfo(info_command.NullAnonymizer())
         text = capture_show_output(instance)
-        self.assertIn("TEST_EXECUTOR", text)
-        self.assertIn("TEST_DAGS_FOLDER", text)
-        self.assertIn("TEST_PLUGINS_FOLDER", text)
-        self.assertIn("TEST_LOG_FOLDER", text)
-        self.assertIn("postgresql+psycopg2://postgres:airflow@postgres/airflow", text)
+        assert "TEST_EXECUTOR" in text
+        assert "TEST_DAGS_FOLDER" in text
+        assert "TEST_PLUGINS_FOLDER" in text
+        assert "TEST_LOG_FOLDER" in text
+        assert "postgresql+psycopg2://postgres:airflow@postgres/airflow" in text
 
 
 class TestConfigInfoLogging(unittest.TestCase):
@@ -126,7 +126,7 @@ class TestConfigInfoLogging(unittest.TestCase):
             configure_logging()
             instance = info_command.ConfigInfo(info_command.NullAnonymizer())
             text = capture_show_output(instance)
-            self.assertIn("stackdriver", text)
+            assert "stackdriver" in text
 
     def tearDown(self) -> None:
         importlib.reload(airflow_local_settings)
@@ -148,8 +148,8 @@ class TestShowInfo(unittest.TestCase):
             info_command.show_info(self.parser.parse_args(["info"]))
 
         output = stdout.getvalue()
-        self.assertIn(f"Apache Airflow: {airflow_version}", output)
-        self.assertIn("postgresql+psycopg2://postgres:airflow@postgres/airflow", output)
+        assert f"Apache Airflow: {airflow_version}" in output
+        assert "postgresql+psycopg2://postgres:airflow@postgres/airflow" in output
 
     @conf_vars(
         {
@@ -161,8 +161,8 @@ class TestShowInfo(unittest.TestCase):
             info_command.show_info(self.parser.parse_args(["info", "--anonymize"]))
 
         output = stdout.getvalue()
-        self.assertIn(f"Apache Airflow: {airflow_version}", output)
-        self.assertIn("postgresql+psycopg2://p...s:PASSWORD@postgres/airflow", output)
+        assert f"Apache Airflow: {airflow_version}" in output
+        assert "postgresql+psycopg2://p...s:PASSWORD@postgres/airflow" in output
 
     @conf_vars(
         {
@@ -185,6 +185,6 @@ class TestShowInfo(unittest.TestCase):
         with contextlib.redirect_stdout(io.StringIO()) as stdout:
             info_command.show_info(self.parser.parse_args(["info", "--file-io"]))
 
-        self.assertIn("https://file.io/TEST", stdout.getvalue())
+        assert "https://file.io/TEST" in stdout.getvalue()
         content = mock_requests.post.call_args[1]["data"]["text"]
-        self.assertIn("postgresql+psycopg2://p...s:PASSWORD@postgres/airflow", content)
+        assert "postgresql+psycopg2://p...s:PASSWORD@postgres/airflow" in content
diff --git a/tests/cli/commands/test_kubernetes_command.py b/tests/cli/commands/test_kubernetes_command.py
index 1a6773e..8ae2eef 100644
--- a/tests/cli/commands/test_kubernetes_command.py
+++ b/tests/cli/commands/test_kubernetes_command.py
@@ -47,11 +47,11 @@ class TestGenerateDagYamlCommand(unittest.TestCase):
                     ]
                 )
             )
-            self.assertEqual(len(os.listdir(directory)), 1)
+            assert len(os.listdir(directory)) == 1
             out_dir = directory + "/airflow_yaml_output/"
-            self.assertEqual(len(os.listdir(out_dir)), 6)
-            self.assertTrue(os.path.isfile(out_dir + file_name))
-            self.assertGreater(os.stat(out_dir + file_name).st_size, 0)
+            assert len(os.listdir(out_dir)) == 6
+            assert os.path.isfile(out_dir + file_name)
+            assert os.stat(out_dir + file_name).st_size > 0
 
 
 class TestCleanUpPodsCommand(unittest.TestCase):
diff --git a/tests/cli/commands/test_legacy_commands.py b/tests/cli/commands/test_legacy_commands.py
index 444cda0..c8d0545 100644
--- a/tests/cli/commands/test_legacy_commands.py
+++ b/tests/cli/commands/test_legacy_commands.py
@@ -20,6 +20,8 @@ import unittest
 from argparse import ArgumentError
 from unittest.mock import MagicMock
 
+import pytest
+
 from airflow.cli import cli_parser
 from airflow.cli.commands import config_command
 from airflow.cli.commands.legacy_commands import COMMAND_MAP, check_legacy_command
@@ -61,27 +63,24 @@ class TestCliDeprecatedCommandsValue(unittest.TestCase):
         cls.parser = cli_parser.get_parser()
 
     def test_should_display_value(self):
-        with self.assertRaises(SystemExit) as cm_exception, contextlib.redirect_stderr(
-            io.StringIO()
-        ) as temp_stderr:
+        with pytest.raises(SystemExit) as ctx, contextlib.redirect_stderr(io.StringIO()) as temp_stderr:
             config_command.get_value(self.parser.parse_args(['worker']))
 
-        self.assertEqual(2, cm_exception.exception.code)
-        self.assertIn(
+        assert 2 == ctx.value.code
+        assert (
             "`airflow worker` command, has been removed, "
-            "please use `airflow celery worker`, see help above.",
-            temp_stderr.getvalue().strip(),
+            "please use `airflow celery worker`, see help above." in temp_stderr.getvalue().strip()
         )
 
     def test_command_map(self):
         for item in LEGACY_COMMANDS:
-            self.assertIsNotNone(COMMAND_MAP[item])
+            assert COMMAND_MAP[item] is not None
 
     def test_check_legacy_command(self):
         action = MagicMock()
-        with self.assertRaises(ArgumentError) as e:
+        with pytest.raises(ArgumentError) as ctx:
             check_legacy_command(action, 'list_users')
-        self.assertEqual(
-            str(e.exception),
-            "argument : `airflow list_users` command, has been removed, please use `airflow users list`",
+        assert (
+            str(ctx.value)
+            == "argument : `airflow list_users` command, has been removed, please use `airflow users list`"
         )
diff --git a/tests/cli/commands/test_plugins_command.py b/tests/cli/commands/test_plugins_command.py
index bbaaad7..262b59b 100644
--- a/tests/cli/commands/test_plugins_command.py
+++ b/tests/cli/commands/test_plugins_command.py
@@ -46,7 +46,7 @@ class TestPluginsCommand(unittest.TestCase):
         with redirect_stdout(io.StringIO()) as temp_stdout:
             plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
             stdout = temp_stdout.getvalue()
-        self.assertIn('No plugins loaded', stdout)
+        assert 'No plugins loaded' in stdout
 
     @mock_plugin_manager(plugins=[TestPlugin])
     def test_should_display_one_plugins(self):
diff --git a/tests/cli/commands/test_pool_command.py b/tests/cli/commands/test_pool_command.py
index d40e187..92fb46d 100644
--- a/tests/cli/commands/test_pool_command.py
+++ b/tests/cli/commands/test_pool_command.py
@@ -22,6 +22,8 @@ import os
 import unittest
 from contextlib import redirect_stdout
 
+import pytest
+
 from airflow import models, settings
 from airflow.cli import cli_parser
 from airflow.cli.commands import pool_command
@@ -59,14 +61,14 @@ class TestCliPools(unittest.TestCase):
         with redirect_stdout(io.StringIO()) as stdout:
             pool_command.pool_list(self.parser.parse_args(['pools', 'list']))
 
-        self.assertIn('foo', stdout.getvalue())
+        assert 'foo' in stdout.getvalue()
 
     def test_pool_list_with_args(self):
         pool_command.pool_list(self.parser.parse_args(['pools', 'list', '--output', 'json']))
 
     def test_pool_create(self):
         pool_command.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
-        self.assertEqual(self.session.query(Pool).count(), 2)
+        assert self.session.query(Pool).count() == 2
 
     def test_pool_get(self):
         pool_command.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
@@ -75,17 +77,17 @@ class TestCliPools(unittest.TestCase):
     def test_pool_delete(self):
         pool_command.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
         pool_command.pool_delete(self.parser.parse_args(['pools', 'delete', 'foo']))
-        self.assertEqual(self.session.query(Pool).count(), 1)
+        assert self.session.query(Pool).count() == 1
 
     def test_pool_import_nonexistent(self):
-        with self.assertRaises(SystemExit):
+        with pytest.raises(SystemExit):
             pool_command.pool_import(self.parser.parse_args(['pools', 'import', 'nonexistent.json']))
 
     def test_pool_import_invalid_json(self):
         with open('pools_import_invalid.json', mode='w') as file:
             file.write("not valid json")
 
-        with self.assertRaises(SystemExit):
+        with pytest.raises(SystemExit):
             pool_command.pool_import(self.parser.parse_args(['pools', 'import', 'pools_import_invalid.json']))
 
     def test_pool_import_invalid_pools(self):
@@ -93,7 +95,7 @@ class TestCliPools(unittest.TestCase):
         with open('pools_import_invalid.json', mode='w') as file:
             json.dump(pool_config_input, file)
 
-        with self.assertRaises(SystemExit):
+        with pytest.raises(SystemExit):
             pool_command.pool_import(self.parser.parse_args(['pools', 'import', 'pools_import_invalid.json']))
 
     def test_pool_import_export(self):
@@ -114,8 +116,6 @@ class TestCliPools(unittest.TestCase):
 
         with open('pools_export.json', mode='r') as file:
             pool_config_output = json.load(file)
-            self.assertEqual(
-                pool_config_input, pool_config_output, "Input and output pool files are not same"
-            )
+            assert pool_config_input == pool_config_output, "Input and output pool files are not same"
         os.remove('pools_import.json')
         os.remove('pools_export.json')
diff --git a/tests/cli/commands/test_role_command.py b/tests/cli/commands/test_role_command.py
index 167e3a0..3ade36b 100644
--- a/tests/cli/commands/test_role_command.py
+++ b/tests/cli/commands/test_role_command.py
@@ -54,25 +54,25 @@ class TestCliRoles(unittest.TestCase):
                 self.appbuilder.sm.delete_role(role_name)
 
     def test_cli_create_roles(self):
-        self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
-        self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
+        assert self.appbuilder.sm.find_role('FakeTeamA') is None
+        assert self.appbuilder.sm.find_role('FakeTeamB') is None
 
         args = self.parser.parse_args(['roles', 'create', 'FakeTeamA', 'FakeTeamB'])
         role_command.roles_create(args)
 
-        self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
-        self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
+        assert self.appbuilder.sm.find_role('FakeTeamA') is not None
+        assert self.appbuilder.sm.find_role('FakeTeamB') is not None
 
     def test_cli_create_roles_is_reentrant(self):
-        self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
-        self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
+        assert self.appbuilder.sm.find_role('FakeTeamA') is None
+        assert self.appbuilder.sm.find_role('FakeTeamB') is None
 
         args = self.parser.parse_args(['roles', 'create', 'FakeTeamA', 'FakeTeamB'])
 
         role_command.roles_create(args)
 
-        self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
-        self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
+        assert self.appbuilder.sm.find_role('FakeTeamA') is not None
+        assert self.appbuilder.sm.find_role('FakeTeamB') is not None
 
     def test_cli_list_roles(self):
         self.appbuilder.sm.add_role('FakeTeamA')
@@ -82,8 +82,8 @@ class TestCliRoles(unittest.TestCase):
             role_command.roles_list(self.parser.parse_args(['roles', 'list']))
             stdout = stdout.getvalue()
 
-        self.assertIn('FakeTeamA', stdout)
-        self.assertIn('FakeTeamB', stdout)
+        assert 'FakeTeamA' in stdout
+        assert 'FakeTeamB' in stdout
 
     def test_cli_list_roles_with_args(self):
         role_command.roles_list(self.parser.parse_args(['roles', 'list', '--output', 'yaml']))
diff --git a/tests/cli/commands/test_sync_perm_command.py b/tests/cli/commands/test_sync_perm_command.py
index 8dd3275..5f7f86e 100644
--- a/tests/cli/commands/test_sync_perm_command.py
+++ b/tests/cli/commands/test_sync_perm_command.py
@@ -57,7 +57,7 @@ class TestCliSyncPerm(unittest.TestCase):
 
         dagbag_mock.assert_called_once_with(read_dags_from_db=True)
         collect_dags_from_db_mock.assert_called_once_with()
-        self.assertEqual(2, len(appbuilder.sm.sync_perm_for_dag.mock_calls))
+        assert 2 == len(appbuilder.sm.sync_perm_for_dag.mock_calls)
         appbuilder.sm.sync_perm_for_dag.assert_any_call(
             'has_access_control', {'Public': {permissions.ACTION_CAN_READ}}
         )
diff --git a/tests/cli/commands/test_task_command.py b/tests/cli/commands/test_task_command.py
index afa16d1..a011ee6 100644
--- a/tests/cli/commands/test_task_command.py
+++ b/tests/cli/commands/test_task_command.py
@@ -82,7 +82,7 @@ class TestCliTasks(unittest.TestCase):
 
         mock_run_mini_scheduler.assert_not_called()
         # Check that prints, and log messages, are shown
-        self.assertIn("'example_python_operator__print_the_context__20180101'", stdout.getvalue())
+        assert "'example_python_operator__print_the_context__20180101'" in stdout.getvalue()
 
     @mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
     def test_run_naive_taskinstance(self, mock_local_job):
@@ -173,8 +173,8 @@ class TestCliTasks(unittest.TestCase):
                 )
             )
         output = stdout.getvalue()
-        self.assertIn('foo=bar', output)
-        self.assertIn('AIRFLOW_TEST_MODE=True', output)
+        assert 'foo=bar' in output
+        assert 'AIRFLOW_TEST_MODE=True' in output
 
     def test_cli_run(self):
         task_command.task_run(
@@ -192,8 +192,9 @@ class TestCliTasks(unittest.TestCase):
         ],
     )
     def test_cli_run_invalid_raw_option(self, option: str):
-        with self.assertRaisesRegex(
-            AirflowException, "Option --raw does not work with some of the other options on this command."
+        with pytest.raises(
+            AirflowException,
+            match="Option --raw does not work with some of the other options on this command.",
         ):
             task_command.task_run(
                 self.parser.parse_args(
@@ -210,7 +211,7 @@ class TestCliTasks(unittest.TestCase):
             )
 
     def test_cli_run_mutually_exclusive(self):
-        with self.assertRaisesRegex(AirflowException, "Option --raw and --local are mutually exclusive."):
+        with pytest.raises(AirflowException, match="Option --raw and --local are mutually exclusive."):
             task_command.task_run(
                 self.parser.parse_args(
                     [
@@ -260,18 +261,15 @@ class TestCliTasks(unittest.TestCase):
             )
         actual_out = json.loads(stdout.getvalue())
 
-        self.assertEqual(len(actual_out), 1)
-        self.assertDictEqual(
-            actual_out[0],
-            {
-                'dag_id': 'example_python_operator',
-                'execution_date': '2016-01-09T00:00:00+00:00',
-                'task_id': 'print_the_context',
-                'state': 'success',
-                'start_date': ti_start.isoformat(),
-                'end_date': ti_end.isoformat(),
-            },
-        )
+        assert len(actual_out) == 1
+        assert actual_out[0] == {
+            'dag_id': 'example_python_operator',
+            'execution_date': '2016-01-09T00:00:00+00:00',
+            'task_id': 'print_the_context',
+            'state': 'success',
+            'start_date': ti_start.isoformat(),
+            'end_date': ti_end.isoformat(),
+        }
 
     def test_subdag_clear(self):
         args = self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator', '--yes'])
@@ -312,7 +310,7 @@ class TestCliTasks(unittest.TestCase):
         ti = TaskInstance(task, args.execution_date)
         ti.refresh_from_db()
         state = ti.current_state()
-        self.assertEqual(state, State.SUCCESS)
+        assert state == State.SUCCESS
 
 
 class TestLogsfromTaskRunCommand(unittest.TestCase):
@@ -359,12 +357,12 @@ class TestLogsfromTaskRunCommand(unittest.TestCase):
         [2020-06-24 16:47:23,537] {logging_mixin.py:91} INFO - [2020-06-24 16:47:23,536] {python.py:135}
         """
         log_lines = [log for log in logs_list if text in log]
-        self.assertEqual(len(log_lines), 1)
+        assert len(log_lines) == 1
         log_line = log_lines[0]
         if not expect_from_logging_mixin:
             # Logs from print statement still show with logging_mixing as filename
             # Example: [2020-06-24 17:07:00,482] {logging_mixin.py:91} INFO - Log from Print statement
-            self.assertNotIn("logging_mixin.py", log_line)
+            assert "logging_mixin.py" not in log_line
         return log_line
 
     @unittest.skipIf(not hasattr(os, 'fork'), "Forking not available")
@@ -381,23 +379,21 @@ class TestLogsfromTaskRunCommand(unittest.TestCase):
         print(logs)  # In case of a test failures this line would show detailed log
         logs_list = logs.splitlines()
 
-        self.assertIn("INFO - Started process", logs)
-        self.assertIn(f"Subtask {self.task_id}", logs)
-        self.assertIn("standard_task_runner.py", logs)
-        self.assertIn(
+        assert "INFO - Started process" in logs
+        assert f"Subtask {self.task_id}" in logs
+        assert "standard_task_runner.py" in logs
+        assert (
             f"INFO - Running: ['airflow', 'tasks', 'run', '{self.dag_id}', "
-            f"'{self.task_id}', '{self.execution_date_str}',",
-            logs,
+            f"'{self.task_id}', '{self.execution_date_str}'," in logs
         )
 
         self.assert_log_line("Log from DAG Logger", logs_list)
         self.assert_log_line("Log from TI Logger", logs_list)
         self.assert_log_line("Log from Print statement", logs_list, expect_from_logging_mixin=True)
 
-        self.assertIn(
+        assert (
             f"INFO - Marking task as SUCCESS. dag_id={self.dag_id}, "
-            f"task_id={self.task_id}, execution_date=20170101T000000",
-            logs,
+            f"task_id={self.task_id}, execution_date=20170101T000000" in logs
         )
 
     @mock.patch("airflow.task.task_runner.standard_task_runner.CAN_FORK", False)
@@ -413,21 +409,19 @@ class TestLogsfromTaskRunCommand(unittest.TestCase):
         print(logs)  # In case of a test failures this line would show detailed log
         logs_list = logs.splitlines()
 
-        self.assertIn(f"Subtask {self.task_id}", logs)
-        self.assertIn("base_task_runner.py", logs)
+        assert f"Subtask {self.task_id}" in logs
+        assert "base_task_runner.py" in logs
         self.assert_log_line("Log from DAG Logger", logs_list)
         self.assert_log_line("Log from TI Logger", logs_list)
         self.assert_log_line("Log from Print statement", logs_list, expect_from_logging_mixin=True)
 
-        self.assertIn(
+        assert (
             f"INFO - Running: ['airflow', 'tasks', 'run', '{self.dag_id}', "
-            f"'{self.task_id}', '{self.execution_date_str}',",
-            logs,
+            f"'{self.task_id}', '{self.execution_date_str}'," in logs
         )
-        self.assertIn(
+        assert (
             f"INFO - Marking task as SUCCESS. dag_id={self.dag_id}, "
-            f"task_id={self.task_id}, execution_date=20170101T000000",
-            logs,
+            f"task_id={self.task_id}, execution_date=20170101T000000" in logs
         )
 
     def test_log_file_template_with_run_task(self):
@@ -543,7 +537,7 @@ class TestCliTaskBackfill(unittest.TestCase):
         ti_dependent0 = TaskInstance(task=dag.get_task(task0_id), execution_date=DEFAULT_DATE)
 
         ti_dependent0.refresh_from_db()
-        self.assertEqual(ti_dependent0.state, State.FAILED)
+        assert ti_dependent0.state == State.FAILED
 
         task1_id = 'test_run_dependency_task'
         args1 = [
@@ -560,7 +554,7 @@ class TestCliTaskBackfill(unittest.TestCase):
             task=dag.get_task(task1_id), execution_date=DEFAULT_DATE + timedelta(days=1)
         )
         ti_dependency.refresh_from_db()
-        self.assertEqual(ti_dependency.state, State.FAILED)
+        assert ti_dependency.state == State.FAILED
 
         task2_id = 'test_run_dependent_task'
         args2 = [
@@ -577,4 +571,4 @@ class TestCliTaskBackfill(unittest.TestCase):
             task=dag.get_task(task2_id), execution_date=DEFAULT_DATE + timedelta(days=1)
         )
         ti_dependent.refresh_from_db()
-        self.assertEqual(ti_dependent.state, State.SUCCESS)
+        assert ti_dependent.state == State.SUCCESS
diff --git a/tests/cli/commands/test_user_command.py b/tests/cli/commands/test_user_command.py
index f5bf167..5ca3bb0 100644
--- a/tests/cli/commands/test_user_command.py
+++ b/tests/cli/commands/test_user_command.py
@@ -158,7 +158,7 @@ class TestCliUsers(unittest.TestCase):
             user_command.users_list(self.parser.parse_args(['users', 'list']))
             stdout = stdout.getvalue()
         for i in range(0, 3):
-            self.assertIn(f'user{i}', stdout)
+            assert f'user{i}' in stdout
 
     def test_cli_list_users_with_args(self):
         user_command.users_list(self.parser.parse_args(['users', 'list', '--output', 'json']))
@@ -166,11 +166,11 @@ class TestCliUsers(unittest.TestCase):
     def test_cli_import_users(self):
         def assert_user_in_roles(email, roles):
             for role in roles:
-                self.assertTrue(_does_user_belong_to_role(self.appbuilder, email, role))
+                assert _does_user_belong_to_role(self.appbuilder, email, role)
 
         def assert_user_not_in_roles(email, roles):
             for role in roles:
-                self.assertFalse(_does_user_belong_to_role(self.appbuilder, email, role))
+                assert not _does_user_belong_to_role(self.appbuilder, email, role)
 
         assert_user_not_in_roles(TEST_USER1_EMAIL, ['Admin', 'Op'])
         assert_user_not_in_roles(TEST_USER2_EMAIL, ['Public'])
@@ -250,8 +250,8 @@ class TestCliUsers(unittest.TestCase):
             matches[0].pop('id')  # this key not required for import
             return matches[0]
 
-        self.assertEqual(find_by_username('imported_user1'), user1)
-        self.assertEqual(find_by_username('imported_user2'), user2)
+        assert find_by_username('imported_user1') == user1
+        assert find_by_username('imported_user2') == user2
 
     def _import_users_from_file(self, user_list):
         json_file_content = json.dumps(user_list)
@@ -291,18 +291,16 @@ class TestCliUsers(unittest.TestCase):
         )
         user_command.users_create(args)
 
-        self.assertFalse(
-            _does_user_belong_to_role(appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'),
-            "User should not yet be a member of role 'Op'",
-        )
+        assert not _does_user_belong_to_role(
+            appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'
+        ), "User should not yet be a member of role 'Op'"
 
         args = self.parser.parse_args(['users', 'add-role', '--username', 'test4', '--role', 'Op'])
         user_command.users_manage_role(args, remove=False)
 
-        self.assertTrue(
-            _does_user_belong_to_role(appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'),
-            "User should have been added to role 'Op'",
-        )
+        assert _does_user_belong_to_role(
+            appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'
+        ), "User should have been added to role 'Op'"
 
     def test_cli_remove_user_role(self):
         args = self.parser.parse_args(
@@ -324,15 +322,13 @@ class TestCliUsers(unittest.TestCase):
         )
         user_command.users_create(args)
 
-        self.assertTrue(
-            _does_user_belong_to_role(appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'),
-            "User should have been created with role 'Viewer'",
-        )
+        assert _does_user_belong_to_role(
+            appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'
+        ), "User should have been created with role 'Viewer'"
 
         args = self.parser.parse_args(['users', 'remove-role', '--username', 'test4', '--role', 'Viewer'])
         user_command.users_manage_role(args, remove=True)
 
-        self.assertFalse(
-            _does_user_belong_to_role(appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'),
-            "User should have been removed from role 'Viewer'",
-        )
+        assert not _does_user_belong_to_role(
+            appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'
+        ), "User should have been removed from role 'Viewer'"
diff --git a/tests/cli/commands/test_variable_command.py b/tests/cli/commands/test_variable_command.py
index c9e9318..8b64ff8 100644
--- a/tests/cli/commands/test_variable_command.py
+++ b/tests/cli/commands/test_variable_command.py
@@ -22,6 +22,8 @@ import tempfile
 import unittest.mock
 from contextlib import redirect_stdout
 
+import pytest
+
 from airflow import models
 from airflow.cli import cli_parser
 from airflow.cli.commands import variable_command
@@ -44,25 +46,26 @@ class TestCliVariables(unittest.TestCase):
     def test_variables_set(self):
         """Test variable_set command"""
         variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'foo', 'bar']))
-        self.assertIsNotNone(Variable.get("foo"))
-        self.assertRaises(KeyError, Variable.get, "foo1")
+        assert Variable.get("foo") is not None
+        with pytest.raises(KeyError):
+            Variable.get("foo1")
 
     def test_variables_get(self):
         Variable.set('foo', {'foo': 'bar'}, serialize_json=True)
 
         with redirect_stdout(io.StringIO()) as stdout:
             variable_command.variables_get(self.parser.parse_args(['variables', 'get', 'foo']))
-            self.assertEqual('{\n  "foo": "bar"\n}\n', stdout.getvalue())
+            assert '{\n  "foo": "bar"\n}\n' == stdout.getvalue()
 
     def test_get_variable_default_value(self):
         with redirect_stdout(io.StringIO()) as stdout:
             variable_command.variables_get(
                 self.parser.parse_args(['variables', 'get', 'baz', '--default', 'bar'])
             )
-            self.assertEqual("bar\n", stdout.getvalue())
+            assert "bar\n" == stdout.getvalue()
 
     def test_get_variable_missing_variable(self):
-        with self.assertRaises(SystemExit):
+        with pytest.raises(SystemExit):
             variable_command.variables_get(self.parser.parse_args(['variables', 'get', 'no-existing-VAR']))
 
     def test_variables_set_different_types(self):
@@ -95,14 +98,14 @@ class TestCliVariables(unittest.TestCase):
         )
 
         # Assert value
-        self.assertEqual({'foo': 'oops'}, Variable.get('dict', deserialize_json=True))
-        self.assertEqual(['oops'], Variable.get('list', deserialize_json=True))
-        self.assertEqual('hello string', Variable.get('str'))  # cannot json.loads(str)
-        self.assertEqual(42, Variable.get('int', deserialize_json=True))
-        self.assertEqual(42.0, Variable.get('float', deserialize_json=True))
-        self.assertEqual(True, Variable.get('true', deserialize_json=True))
-        self.assertEqual(False, Variable.get('false', deserialize_json=True))
-        self.assertEqual(None, Variable.get('null', deserialize_json=True))
+        assert {'foo': 'oops'} == Variable.get('dict', deserialize_json=True)
+        assert ['oops'] == Variable.get('list', deserialize_json=True)
+        assert 'hello string' == Variable.get('str')  # cannot json.loads(str)
+        assert 42 == Variable.get('int', deserialize_json=True)
+        assert 42.0 == Variable.get('float', deserialize_json=True)
+        assert Variable.get('true', deserialize_json=True) is True
+        assert Variable.get('false', deserialize_json=True) is False
+        assert Variable.get('null', deserialize_json=True) is None
 
         os.remove('variables_types.json')
 
@@ -115,11 +118,12 @@ class TestCliVariables(unittest.TestCase):
         """Test variable_delete command"""
         variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'foo', 'bar']))
         variable_command.variables_delete(self.parser.parse_args(['variables', 'delete', 'foo']))
-        self.assertRaises(KeyError, Variable.get, "foo")
+        with pytest.raises(KeyError):
+            Variable.get("foo")
 
     def test_variables_import(self):
         """Test variables_import command"""
-        with self.assertRaisesRegex(SystemExit, r"Invalid variables file"):
+        with pytest.raises(SystemExit, match=r"Invalid variables file"):
             variable_command.variables_import(self.parser.parse_args(['variables', 'import', os.devnull]))
 
     def test_variables_export(self):
@@ -143,14 +147,14 @@ class TestCliVariables(unittest.TestCase):
         variable_command.variables_delete(self.parser.parse_args(['variables', 'delete', 'foo']))
         variable_command.variables_import(self.parser.parse_args(['variables', 'import', tmp1.name]))
 
-        self.assertEqual('original', Variable.get('bar'))
-        self.assertEqual('{\n  "foo": "bar"\n}', Variable.get('foo'))
+        assert 'original' == Variable.get('bar')
+        assert '{\n  "foo": "bar"\n}' == Variable.get('foo')
 
         # Second export
         variable_command.variables_export(self.parser.parse_args(['variables', 'export', tmp2.name]))
 
         second_exp = open(tmp2.name)
-        self.assertEqual(first_exp.read(), second_exp.read())
+        assert first_exp.read() == second_exp.read()
 
         # Clean up files
         second_exp.close()
diff --git a/tests/cli/commands/test_version_command.py b/tests/cli/commands/test_version_command.py
index ad4f5a8..e4454aa 100644
--- a/tests/cli/commands/test_version_command.py
+++ b/tests/cli/commands/test_version_command.py
@@ -32,4 +32,4 @@ class TestCliVersion(unittest.TestCase):
     def test_cli_version(self):
         with redirect_stdout(io.StringIO()) as stdout:
             airflow.cli.commands.version_command.version(self.parser.parse_args(['version']))
-        self.assertIn(version, stdout.getvalue())
+        assert version in stdout.getvalue()
diff --git a/tests/cli/commands/test_webserver_command.py b/tests/cli/commands/test_webserver_command.py
index 1d7a670..a73bead 100644
--- a/tests/cli/commands/test_webserver_command.py
+++ b/tests/cli/commands/test_webserver_command.py
@@ -85,7 +85,7 @@ class TestGunicornMonitor(unittest.TestCase):
         self.monitor._spawn_new_workers.assert_called_once_with(2)  # pylint: disable=no-member
         self.monitor._kill_old_workers.assert_not_called()  # pylint: disable=no-member
         self.monitor._reload_gunicorn.assert_not_called()  # pylint: disable=no-member
-        self.assertAlmostEqual(self.monitor._last_refresh_time, time.monotonic(), delta=5)
+        assert abs(self.monitor._last_refresh_time - time.monotonic()) < 5
 
     @mock.patch('airflow.cli.commands.webserver_command.sleep')
     def test_should_reload_when_plugin_has_been_changed(self, mock_sleep):
@@ -112,7 +112,7 @@ class TestGunicornMonitor(unittest.TestCase):
         self.monitor._spawn_new_workers.assert_not_called()  # pylint: disable=no-member
         self.monitor._kill_old_workers.assert_not_called()  # pylint: disable=no-member
         self.monitor._reload_gunicorn.assert_called_once_with()  # pylint: disable=no-member
-        self.assertAlmostEqual(self.monitor._last_refresh_time, time.monotonic(), delta=5)
+        assert abs(self.monitor._last_refresh_time - time.monotonic()) < 5
 
 
 class TestGunicornMonitorGeneratePluginState(unittest.TestCase):
@@ -144,32 +144,32 @@ class TestGunicornMonitorGeneratePluginState(unittest.TestCase):
             state_a = monitor._generate_plugin_state()
             state_b = monitor._generate_plugin_state()
 
-            self.assertEqual(state_a, state_b)
-            self.assertEqual(3, len(state_a))
+            assert state_a == state_b
+            assert 3 == len(state_a)
 
             # Should detect new file
             self._prepare_test_file(f"{tempdir}/file4.txt", 400)
 
             state_c = monitor._generate_plugin_state()
 
-            self.assertNotEqual(state_b, state_c)
-            self.assertEqual(4, len(state_c))
+            assert state_b != state_c
+            assert 4 == len(state_c)
 
             # Should detect changes in files
             self._prepare_test_file(f"{tempdir}/file4.txt", 450)
 
             state_d = monitor._generate_plugin_state()
 
-            self.assertNotEqual(state_c, state_d)
-            self.assertEqual(4, len(state_d))
+            assert state_c != state_d
+            assert 4 == len(state_d)
 
             # Should support large files
             self._prepare_test_file(f"{tempdir}/file4.txt", 4000000)
 
             state_d = monitor._generate_plugin_state()
 
-            self.assertNotEqual(state_c, state_d)
-            self.assertEqual(4, len(state_d))
+            assert state_c != state_d
+            assert 4 == len(state_d)
 
 
 class TestCLIGetNumReadyWorkersRunning(unittest.TestCase):
@@ -195,27 +195,27 @@ class TestCLIGetNumReadyWorkersRunning(unittest.TestCase):
         self.process.children.return_value = [self.child]
 
         with mock.patch('psutil.Process', return_value=self.process):
-            self.assertEqual(self.monitor._get_num_ready_workers_running(), 1)
+            assert self.monitor._get_num_ready_workers_running() == 1
 
     def test_ready_prefix_on_cmdline_no_children(self):
         self.process.children.return_value = []
 
         with mock.patch('psutil.Process', return_value=self.process):
-            self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
+            assert self.monitor._get_num_ready_workers_running() == 0
 
     def test_ready_prefix_on_cmdline_zombie(self):
         self.child.cmdline.return_value = []
         self.process.children.return_value = [self.child]
 
         with mock.patch('psutil.Process', return_value=self.process):
-            self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
+            assert self.monitor._get_num_ready_workers_running() == 0
 
     def test_ready_prefix_on_cmdline_dead_process(self):
         self.child.cmdline.side_effect = psutil.NoSuchProcess(11347)
         self.process.children.return_value = [self.child]
 
         with mock.patch('psutil.Process', return_value=self.process):
-            self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
+            assert self.monitor._get_num_ready_workers_running() == 0
 
 
 class TestCliWebServer(unittest.TestCase):
@@ -275,7 +275,7 @@ class TestCliWebServer(unittest.TestCase):
         ):
             # Run webserver in foreground and terminate it.
             proc = subprocess.Popen(["airflow", "webserver"])
-            self.assertEqual(None, proc.poll())
+            assert proc.poll() is None
 
         # Wait for process
         time.sleep(10)
@@ -284,7 +284,7 @@ class TestCliWebServer(unittest.TestCase):
         proc.terminate()
         # -15 - the server was stopped before it started
         #   0 - the server terminated correctly
-        self.assertIn(proc.wait(60), (-15, 0))
+        assert proc.wait(60) in (-15, 0)
 
     def test_cli_webserver_foreground_with_pid(self):
         with tempfile.TemporaryDirectory(prefix='tmp-pid') as tmpdir:
@@ -296,14 +296,14 @@ class TestCliWebServer(unittest.TestCase):
                 AIRFLOW__WEBSERVER__WORKERS="1",
             ):
                 proc = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
-                self.assertEqual(None, proc.poll())
+                assert proc.poll() is None
 
             # Check the file specified by --pid option exists
             self._wait_pidfile(pidfile)
 
             # Terminate webserver
             proc.terminate()
-            self.assertEqual(0, proc.wait(60))
+            assert 0 == proc.wait(60)
 
     @pytest.mark.quarantined
     def test_cli_webserver_background(self):
@@ -335,21 +335,19 @@ class TestCliWebServer(unittest.TestCase):
                         logfile,
                     ]
                 )
-                self.assertEqual(None, proc.poll())
+                assert proc.poll() is None
 
                 pid_monitor = self._wait_pidfile(pidfile_monitor)
                 self._wait_pidfile(pidfile_webserver)
 
                 # Assert that gunicorn and its monitor are launched.
-                self.assertEqual(
-                    0, subprocess.Popen(["pgrep", "-f", "-c", "airflow webserver --daemon"]).wait()
-                )
-                self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "-f", "gunicorn: master"]).wait())
+                assert 0 == subprocess.Popen(["pgrep", "-f", "-c", "airflow webserver --daemon"]).wait()
+                assert 0 == subprocess.Popen(["pgrep", "-c", "-f", "gunicorn: master"]).wait()
 
                 # Terminate monitor process.
                 proc = psutil.Process(pid_monitor)
                 proc.terminate()
-                self.assertIn(proc.wait(120), (0, None))
+                assert proc.wait(120) in (0, None)
 
                 self._check_processes()
             except Exception:
@@ -367,20 +365,18 @@ class TestCliWebServer(unittest.TestCase):
         # Shorten timeout so that this test doesn't take too long time
         args = self.parser.parse_args(['webserver'])
         with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
-            with self.assertRaises(SystemExit) as e:
+            with pytest.raises(SystemExit) as ctx:
                 webserver_command.webserver(args)
-        self.assertEqual(e.exception.code, 1)
+        assert ctx.value.code == 1
 
     def test_cli_webserver_debug(self):
         env = os.environ.copy()
         proc = psutil.Popen(["airflow", "webserver", "--debug"], env=env)
         time.sleep(3)  # wait for webserver to start
         return_code = proc.poll()
-        self.assertEqual(
-            None, return_code, f"webserver terminated with return code {return_code} in debug mode"
-        )
+        assert return_code is None, f"webserver terminated with return code {return_code} in debug mode"
         proc.terminate()
-        self.assertEqual(-15, proc.wait(60))
+        assert -15 == proc.wait(60)
 
     def test_cli_webserver_access_log_format(self):
 
@@ -409,7 +405,7 @@ class TestCliWebServer(unittest.TestCase):
                     access_logformat,
                 ]
             )
-            self.assertEqual(None, proc.poll())
+            assert proc.poll() is None
 
             # Wait for webserver process
             time.sleep(10)
@@ -419,9 +415,9 @@ class TestCliWebServer(unittest.TestCase):
             try:
                 file = open(access_logfile)
                 log = json.loads(file.read())
-                self.assertEqual('127.0.0.1', log.get('remote_ip'))
-                self.assertEqual(len(log), 9)
-                self.assertEqual('GET', log.get('request_method'))
+                assert '127.0.0.1' == log.get('remote_ip')
+                assert len(log) == 9
+                assert 'GET' == log.get('request_method')
 
             except OSError:
                 print("access log file not found at " + access_logfile)
@@ -430,5 +426,5 @@ class TestCliWebServer(unittest.TestCase):
             proc.terminate()
             # -15 - the server was stopped before it started
             #   0 - the server terminated correctly
-            self.assertIn(proc.wait(60), (-15, 0))
+            assert proc.wait(60) in (-15, 0)
             self._check_processes()
diff --git a/tests/cli/test_cli_parser.py b/tests/cli/test_cli_parser.py
index 4a2dd85..1c2e2aa 100644
--- a/tests/cli/test_cli_parser.py
+++ b/tests/cli/test_cli_parser.py
@@ -24,6 +24,8 @@ import re
 from collections import Counter
 from unittest import TestCase
 
+import pytest
+
 from airflow.cli import cli_parser
 
 # Can not be `--snake_case` or contain uppercase letter
@@ -43,7 +45,7 @@ class TestCli(TestCase):
             arg for arg in cli_args.values() if len(arg.flags) == 1 and arg.flags[0].startswith("-")
         ]
         for arg in optional_long:
-            self.assertIsNone(ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]), f"{arg.flags[0]} is not match")
+            assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]) is None, f"{arg.flags[0]} is not match"
 
     def test_arg_option_mix_short_long(self):
         """
@@ -53,10 +55,8 @@ class TestCli(TestCase):
             arg for arg in cli_args.values() if len(arg.flags) == 2 and arg.flags[0].startswith("-")
         ]
         for arg in optional_mix:
-            self.assertIsNotNone(
-                LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]), f"{arg.flags[0]} is not match"
-            )
-            self.assertIsNone(ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]), f"{arg.flags[1]} is not match")
+            assert LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]) is not None, f"{arg.flags[0]} is not match"
+            assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]) is None, f"{arg.flags[1]} is not match"
 
     def test_subcommand_conflict(self):
         """
@@ -69,9 +69,7 @@ class TestCli(TestCase):
         }
         for group_name, sub in subcommand.items():
             name = [command.name.lower() for command in sub]
-            self.assertEqual(
-                len(name), len(set(name)), f"Command group {group_name} have conflict subcommand"
-            )
+            assert len(name) == len(set(name)), f"Command group {group_name} have conflict subcommand"
 
     def test_subcommand_arg_name_conflict(self):
         """
@@ -85,10 +83,8 @@ class TestCli(TestCase):
         for group, command in subcommand.items():
             for com in command:
                 conflict_arg = [arg for arg, count in Counter(com.args).items() if count > 1]
-                self.assertListEqual(
-                    [],
-                    conflict_arg,
-                    f"Command group {group} function {com.name} have " f"conflict args name {conflict_arg}",
+                assert [] == conflict_arg, (
+                    f"Command group {group} function {com.name} have " f"conflict args name {conflict_arg}"
                 )
 
     def test_subcommand_arg_flag_conflict(self):
@@ -106,31 +102,25 @@ class TestCli(TestCase):
                     a.flags[0] for a in com.args if (len(a.flags) == 1 and not a.flags[0].startswith("-"))
                 ]
                 conflict_position = [arg for arg, count in Counter(position).items() if count > 1]
-                self.assertListEqual(
-                    [],
-                    conflict_position,
+                assert [] == conflict_position, (
                     f"Command group {group} function {com.name} have conflict "
-                    f"position flags {conflict_position}",
+                    f"position flags {conflict_position}"
                 )
 
                 long_option = [
                     a.flags[0] for a in com.args if (len(a.flags) == 1 and a.flags[0].startswith("-"))
                 ] + [a.flags[1] for a in com.args if len(a.flags) == 2]
                 conflict_long_option = [arg for arg, count in Counter(long_option).items() if count > 1]
-                self.assertListEqual(
-                    [],
-                    conflict_long_option,
+                assert [] == conflict_long_option, (
                     f"Command group {group} function {com.name} have conflict "
-                    f"long option flags {conflict_long_option}",
+                    f"long option flags {conflict_long_option}"
                 )
 
                 short_option = [a.flags[0] for a in com.args if len(a.flags) == 2]
                 conflict_short_option = [arg for arg, count in Counter(short_option).items() if count > 1]
-                self.assertEqual(
-                    [],
-                    conflict_short_option,
+                assert [] == conflict_short_option, (
                     f"Command group {group} function {com.name} have conflict "
-                    f"short option flags {conflict_short_option}",
+                    f"short option flags {conflict_short_option}"
                 )
 
     def test_falsy_default_value(self):
@@ -139,20 +129,20 @@ class TestCli(TestCase):
         arg.add_to_parser(parser)
 
         args = parser.parse_args(['--test', '10'])
-        self.assertEqual(args.test, 10)
+        assert args.test == 10
 
         args = parser.parse_args([])
-        self.assertEqual(args.test, 0)
+        assert args.test == 0
 
     def test_commands_and_command_group_sections(self):
         parser = cli_parser.get_parser()
 
         with contextlib.redirect_stdout(io.StringIO()) as stdout:
-            with self.assertRaises(SystemExit):
+            with pytest.raises(SystemExit):
                 parser.parse_args(['--help'])
             stdout = stdout.getvalue()
-        self.assertIn("Commands", stdout)
-        self.assertIn("Groups", stdout)
+        assert "Commands" in stdout
+        assert "Groups" in stdout
 
     def test_should_display_help(self):
         parser = cli_parser.get_parser()
@@ -167,12 +157,12 @@ class TestCli(TestCase):
             )
         ]
         for cmd_args in all_command_as_args:
-            with self.assertRaises(SystemExit):
+            with pytest.raises(SystemExit):
                 parser.parse_args([*cmd_args, '--help'])
 
     def test_positive_int(self):
-        self.assertEqual(1, cli_parser.positive_int('1'))
+        assert 1 == cli_parser.positive_int('1')
 
-        with self.assertRaises(argparse.ArgumentTypeError):
+        with pytest.raises(argparse.ArgumentTypeError):
             cli_parser.positive_int('0')
             cli_parser.positive_int('-1')
diff --git a/tests/core/test_config_templates.py b/tests/core/test_config_templates.py
index 42ba991..2efa838 100644
--- a/tests/core/test_config_templates.py
+++ b/tests/core/test_config_templates.py
@@ -83,7 +83,7 @@ class TestAirflowCfg(unittest.TestCase):
     def test_should_be_ascii_file(self, filename: str):
         with open(os.path.join(CONFIG_TEMPLATES_FOLDER, filename), "rb") as f:
             content = f.read().decode("ascii")
-        self.assertTrue(content)
+        assert content
 
     @parameterized.expand(
         [
@@ -102,4 +102,4 @@ class TestAirflowCfg(unittest.TestCase):
         config = configparser.ConfigParser()
         config.read(filepath)
 
-        self.assertEqual(expected_sections, config.sections())
+        assert expected_sections == config.sections()
diff --git a/tests/core/test_configuration.py b/tests/core/test_configuration.py
index 338c3e8..a279837 100644
--- a/tests/core/test_configuration.py
+++ b/tests/core/test_configuration.py
@@ -24,6 +24,8 @@ import warnings
 from collections import OrderedDict
 from unittest import mock
 
+import pytest
+
 from airflow import configuration
 from airflow.configuration import (
     DEFAULT_CONFIG,
@@ -54,44 +56,44 @@ class TestConf(unittest.TestCase):
         with unittest.mock.patch.dict('os.environ'):
             if 'AIRFLOW_HOME' in os.environ:
                 del os.environ['AIRFLOW_HOME']
-            self.assertEqual(get_airflow_home(), expand_env_var('~/airflow'))
+            assert get_airflow_home() == expand_env_var('~/airflow')
 
     def test_airflow_home_override(self):
         with unittest.mock.patch.dict('os.environ', AIRFLOW_HOME='/path/to/airflow'):
-            self.assertEqual(get_airflow_home(), '/path/to/airflow')
+            assert get_airflow_home() == '/path/to/airflow'
 
     def test_airflow_config_default(self):
         with unittest.mock.patch.dict('os.environ'):
             if 'AIRFLOW_CONFIG' in os.environ:
                 del os.environ['AIRFLOW_CONFIG']
-            self.assertEqual(get_airflow_config('/home/airflow'), expand_env_var('/home/airflow/airflow.cfg'))
+            assert get_airflow_config('/home/airflow') == expand_env_var('/home/airflow/airflow.cfg')
 
     def test_airflow_config_override(self):
         with unittest.mock.patch.dict('os.environ', AIRFLOW_CONFIG='/path/to/airflow/airflow.cfg'):
-            self.assertEqual(get_airflow_config('/home//airflow'), '/path/to/airflow/airflow.cfg')
+            assert get_airflow_config('/home//airflow') == '/path/to/airflow/airflow.cfg'
 
     @conf_vars({("core", "percent"): "with%%inside"})
     def test_case_sensitivity(self):
         # section and key are case insensitive for get method
         # note: this is not the case for as_dict method
-        self.assertEqual(conf.get("core", "percent"), "with%inside")
-        self.assertEqual(conf.get("core", "PERCENT"), "with%inside")
-        self.assertEqual(conf.get("CORE", "PERCENT"), "with%inside")
+        assert conf.get("core", "percent") == "with%inside"
+        assert conf.get("core", "PERCENT") == "with%inside"
+        assert conf.get("CORE", "PERCENT") == "with%inside"
 
     def test_env_var_config(self):
         opt = conf.get('testsection', 'testkey')
-        self.assertEqual(opt, 'testvalue')
+        assert opt == 'testvalue'
 
         opt = conf.get('testsection', 'testpercent')
-        self.assertEqual(opt, 'with%percent')
+        assert opt == 'with%percent'
 
-        self.assertTrue(conf.has_option('testsection', 'testkey'))
+        assert conf.has_option('testsection', 'testkey')
 
         with unittest.mock.patch.dict(
             'os.environ', AIRFLOW__KUBERNETES_ENVIRONMENT_VARIABLES__AIRFLOW__TESTSECTION__TESTKEY='nested'
         ):
             opt = conf.get('kubernetes_environment_variables', 'AIRFLOW__TESTSECTION__TESTKEY')
-            self.assertEqual(opt, 'nested')
+            assert opt == 'nested'
 
     @mock.patch.dict(
         'os.environ', AIRFLOW__KUBERNETES_ENVIRONMENT_VARIABLES__AIRFLOW__TESTSECTION__TESTKEY='nested'
@@ -101,42 +103,40 @@ class TestConf(unittest.TestCase):
         cfg_dict = conf.as_dict()
 
         # test that configs are picked up
-        self.assertEqual(cfg_dict['core']['unit_test_mode'], 'True')
+        assert cfg_dict['core']['unit_test_mode'] == 'True'
 
-        self.assertEqual(cfg_dict['core']['percent'], 'with%inside')
+        assert cfg_dict['core']['percent'] == 'with%inside'
 
         # test env vars
-        self.assertEqual(cfg_dict['testsection']['testkey'], '< hidden >')
-        self.assertEqual(
-            cfg_dict['kubernetes_environment_variables']['AIRFLOW__TESTSECTION__TESTKEY'], '< hidden >'
-        )
+        assert cfg_dict['testsection']['testkey'] == '< hidden >'
+        assert cfg_dict['kubernetes_environment_variables']['AIRFLOW__TESTSECTION__TESTKEY'] == '< hidden >'
 
     def test_conf_as_dict_source(self):
         # test display_source
         cfg_dict = conf.as_dict(display_source=True)
-        self.assertEqual(cfg_dict['core']['load_examples'][1], 'airflow.cfg')
-        self.assertEqual(cfg_dict['core']['load_default_connections'][1], 'airflow.cfg')
-        self.assertEqual(cfg_dict['testsection']['testkey'], ('< hidden >', 'env var'))
+        assert cfg_dict['core']['load_examples'][1] == 'airflow.cfg'
+        assert cfg_dict['core']['load_default_connections'][1] == 'airflow.cfg'
+        assert cfg_dict['testsection']['testkey'] == ('< hidden >', 'env var')
 
     def test_conf_as_dict_sensitive(self):
         # test display_sensitive
         cfg_dict = conf.as_dict(display_sensitive=True)
-        self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue')
-        self.assertEqual(cfg_dict['testsection']['testpercent'], 'with%percent')
+        assert cfg_dict['testsection']['testkey'] == 'testvalue'
+        assert cfg_dict['testsection']['testpercent'] == 'with%percent'
 
         # test display_source and display_sensitive
         cfg_dict = conf.as_dict(display_sensitive=True, display_source=True)
-        self.assertEqual(cfg_dict['testsection']['testkey'], ('testvalue', 'env var'))
+        assert cfg_dict['testsection']['testkey'] == ('testvalue', 'env var')
 
     @conf_vars({("core", "percent"): "with%%inside"})
     def test_conf_as_dict_raw(self):
         # test display_sensitive
         cfg_dict = conf.as_dict(raw=True, display_sensitive=True)
-        self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue')
+        assert cfg_dict['testsection']['testkey'] == 'testvalue'
 
         # Values with '%' in them should be escaped
-        self.assertEqual(cfg_dict['testsection']['testpercent'], 'with%%percent')
-        self.assertEqual(cfg_dict['core']['percent'], 'with%%inside')
+        assert cfg_dict['testsection']['testpercent'] == 'with%%percent'
+        assert cfg_dict['core']['percent'] == 'with%%inside'
 
     def test_conf_as_dict_exclude_env(self):
         # test display_sensitive
@@ -144,7 +144,7 @@ class TestConf(unittest.TestCase):
 
         # Since testsection is only created from env vars, it shouldn't be
         # present at all if we don't ask for env vars to be included.
-        self.assertNotIn('testsection', cfg_dict)
+        assert 'testsection' not in cfg_dict
 
     def test_command_precedence(self):
         test_config = '''[test]
@@ -167,35 +167,35 @@ key6 = value6
             ('test', 'key2'),
             ('test', 'key4'),
         }
-        self.assertEqual('hello', test_conf.get('test', 'key1'))
-        self.assertEqual('cmd_result', test_conf.get('test', 'key2'))
-        self.assertEqual('airflow', test_conf.get('test', 'key3'))
-        self.assertEqual('key4_result', test_conf.get('test', 'key4'))
-        self.assertEqual('value6', test_conf.get('another', 'key6'))
-
-        self.assertEqual('hello', test_conf.get('test', 'key1', fallback='fb'))
-        self.assertEqual('value6', test_conf.get('another', 'key6', fallback='fb'))
-        self.assertEqual('fb', test_conf.get('another', 'key7', fallback='fb'))
-        self.assertEqual(True, test_conf.getboolean('another', 'key8_boolean', fallback='True'))
-        self.assertEqual(10, test_conf.getint('another', 'key8_int', fallback='10'))
-        self.assertEqual(1.0, test_conf.getfloat('another', 'key8_float', fallback='1'))
-
-        self.assertTrue(test_conf.has_option('test', 'key1'))
-        self.assertTrue(test_conf.has_option('test', 'key2'))
-        self.assertTrue(test_conf.has_option('test', 'key3'))
-        self.assertTrue(test_conf.has_option('test', 'key4'))
-        self.assertFalse(test_conf.has_option('test', 'key5'))
-        self.assertTrue(test_conf.has_option('another', 'key6'))
+        assert 'hello' == test_conf.get('test', 'key1')
+        assert 'cmd_result' == test_conf.get('test', 'key2')
+        assert 'airflow' == test_conf.get('test', 'key3')
+        assert 'key4_result' == test_conf.get('test', 'key4')
+        assert 'value6' == test_conf.get('another', 'key6')
+
+        assert 'hello' == test_conf.get('test', 'key1', fallback='fb')
+        assert 'value6' == test_conf.get('another', 'key6', fallback='fb')
+        assert 'fb' == test_conf.get('another', 'key7', fallback='fb')
+        assert test_conf.getboolean('another', 'key8_boolean', fallback='True') is True
+        assert 10 == test_conf.getint('another', 'key8_int', fallback='10')
+        assert 1.0 == test_conf.getfloat('another', 'key8_float', fallback='1')
+
+        assert test_conf.has_option('test', 'key1')
+        assert test_conf.has_option('test', 'key2')
+        assert test_conf.has_option('test', 'key3')
+        assert test_conf.has_option('test', 'key4')
+        assert not test_conf.has_option('test', 'key5')
+        assert test_conf.has_option('another', 'key6')
 
         cfg_dict = test_conf.as_dict(display_sensitive=True)
-        self.assertEqual('cmd_result', cfg_dict['test']['key2'])
-        self.assertNotIn('key2_cmd', cfg_dict['test'])
+        assert 'cmd_result' == cfg_dict['test']['key2']
+        assert 'key2_cmd' not in cfg_dict['test']
 
         # If we exclude _cmds then we should still see the commands to run, not
         # their values
         cfg_dict = test_conf.as_dict(include_cmds=False, display_sensitive=True)
-        self.assertNotIn('key4', cfg_dict['test'])
-        self.assertEqual('printf key4_result', cfg_dict['test']['key4_cmd'])
+        assert 'key4' not in cfg_dict['test']
+        assert 'printf key4_result' == cfg_dict['test']['key4_cmd']
 
     @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
     @conf_vars(
@@ -240,9 +240,7 @@ sql_alchemy_conn = airflow
             ('test', 'sql_alchemy_conn'),
         }
 
-        self.assertEqual(
-            'sqlite:////Users/airflow/airflow/airflow.db', test_conf.get('test', 'sql_alchemy_conn')
-        )
+        assert 'sqlite:////Users/airflow/airflow/airflow.db' == test_conf.get('test', 'sql_alchemy_conn')
 
     def test_getboolean(self):
         """Test AirflowConfigParser.getboolean"""
@@ -264,22 +262,22 @@ key7 = 0
 key8 = true #123
 """
         test_conf = AirflowConfigParser(default_config=test_config)
-        with self.assertRaisesRegex(
+        with pytest.raises(
             AirflowConfigException,
-            re.escape(
+            match=re.escape(
                 'Failed to convert value to bool. Please check "key1" key in "type_validation" section. '
                 'Current value: "non_bool_value".'
             ),
         ):
             test_conf.getboolean('type_validation', 'key1')
-        self.assertTrue(isinstance(test_conf.getboolean('true', 'key3'), bool))
-        self.assertEqual(True, test_conf.getboolean('true', 'key2'))
-        self.assertEqual(True, test_conf.getboolean('true', 'key3'))
-        self.assertEqual(True, test_conf.getboolean('true', 'key4'))
-        self.assertEqual(False, test_conf.getboolean('false', 'key5'))
-        self.assertEqual(False, test_conf.getboolean('false', 'key6'))
-        self.assertEqual(False, test_conf.getboolean('false', 'key7'))
-        self.assertEqual(True, test_conf.getboolean('inline-comment', 'key8'))
+        assert isinstance(test_conf.getboolean('true', 'key3'), bool)
+        assert test_conf.getboolean('true', 'key2') is True
+        assert test_conf.getboolean('true', 'key3') is True
+        assert test_conf.getboolean('true', 'key4') is True
+        assert test_conf.getboolean('false', 'key5') is False
+        assert test_conf.getboolean('false', 'key6') is False
+        assert test_conf.getboolean('false', 'key7') is False
+        assert test_conf.getboolean('inline-comment', 'key8') is True
 
     def test_getint(self):
         """Test AirflowConfigParser.getint"""
@@ -291,16 +289,16 @@ key1 = str
 key2 = 1
 """
         test_conf = AirflowConfigParser(default_config=test_config)
-        with self.assertRaisesRegex(
+        with pytest.raises(
             AirflowConfigException,
-            re.escape(
+            match=re.escape(
                 'Failed to convert value to int. Please check "key1" key in "invalid" section. '
                 'Current value: "str".'
             ),
         ):
             test_conf.getint('invalid', 'key1')
-        self.assertTrue(isinstance(test_conf.getint('valid', 'key2'), int))
-        self.assertEqual(1, test_conf.getint('valid', 'key2'))
+        assert isinstance(test_conf.getint('valid', 'key2'), int)
+        assert 1 == test_conf.getint('valid', 'key2')
 
     def test_getfloat(self):
         """Test AirflowConfigParser.getfloat"""
@@ -312,16 +310,16 @@ key1 = str
 key2 = 1.23
 """
         test_conf = AirflowConfigParser(default_config=test_config)
-        with self.assertRaisesRegex(
+        with pytest.raises(
             AirflowConfigException,
-            re.escape(
+            match=re.escape(
                 'Failed to convert value to float. Please check "key1" key in "invalid" section. '
                 'Current value: "str".'
             ),
         ):
             test_conf.getfloat('invalid', 'key1')
-        self.assertTrue(isinstance(test_conf.getfloat('valid', 'key2'), float))
-        self.assertEqual(1.23, test_conf.getfloat('valid', 'key2'))
+        assert isinstance(test_conf.getfloat('valid', 'key2'), float)
+        assert 1.23 == test_conf.getfloat('valid', 'key2')
 
     def test_has_option(self):
         test_config = '''[test]
@@ -329,9 +327,9 @@ key1 = value1
 '''
         test_conf = AirflowConfigParser()
         test_conf.read_string(test_config)
-        self.assertTrue(test_conf.has_option('test', 'key1'))
-        self.assertFalse(test_conf.has_option('test', 'key_not_exists'))
-        self.assertFalse(test_conf.has_option('section_not_exists', 'key1'))
+        assert test_conf.has_option('test', 'key1')
+        assert not test_conf.has_option('test', 'key_not_exists')
+        assert not test_conf.has_option('section_not_exists', 'key1')
 
     def test_remove_option(self):
         test_config = '''[test]
@@ -346,12 +344,12 @@ key2 = airflow
         test_conf = AirflowConfigParser(default_config=parameterized_config(test_config_default))
         test_conf.read_string(test_config)
 
-        self.assertEqual('hello', test_conf.get('test', 'key1'))
+        assert 'hello' == test_conf.get('test', 'key1')
         test_conf.remove_option('test', 'key1', remove_default=False)
-        self.assertEqual('awesome', test_conf.get('test', 'key1'))
+        assert 'awesome' == test_conf.get('test', 'key1')
 
         test_conf.remove_option('test', 'key2')
-        self.assertFalse(test_conf.has_option('test', 'key2'))
+        assert not test_conf.has_option('test', 'key2')
 
     def test_getsection(self):
         test_config = '''
@@ -371,21 +369,14 @@ key3 = value3
         test_conf = AirflowConfigParser(default_config=parameterized_config(test_config_default))
         test_conf.read_string(test_config)
 
-        self.assertEqual(OrderedDict([('key1', 'hello'), ('key2', 'airflow')]), test_conf.getsection('test'))
-        self.assertEqual(
-            OrderedDict([('key3', 'value3'), ('testkey', 'testvalue'), ('testpercent', 'with%percent')]),
-            test_conf.getsection('testsection'),
-        )
+        assert OrderedDict([('key1', 'hello'), ('key2', 'airflow')]) == test_conf.getsection('test')
+        assert OrderedDict(
+            [('key3', 'value3'), ('testkey', 'testvalue'), ('testpercent', 'with%percent')]
+        ) == test_conf.getsection('testsection')
 
-        self.assertEqual(
-            OrderedDict([('key', 'value')]),
-            test_conf.getsection('new_section'),
-        )
+        assert OrderedDict([('key', 'value')]) == test_conf.getsection('new_section')
 
-        self.assertEqual(
-            None,
-            test_conf.getsection('non_existent_section'),
-        )
+        assert test_conf.getsection('non_existent_section') is None
 
     def test_get_section_should_respect_cmd_env_variable(self):
         with tempfile.NamedTemporaryFile(delete=False) as cmd_file:
@@ -398,7 +389,7 @@ key3 = value3
             with mock.patch.dict("os.environ", {"AIRFLOW__WEBSERVER__SECRET_KEY_CMD": cmd_file.name}):
                 content = conf.getsection("webserver")
             os.unlink(cmd_file.name)
-        self.assertEqual(content["secret_key"], "difficult_unpredictable_cat_password")
+        assert content["secret_key"] == "difficult_unpredictable_cat_password"
 
     def test_kubernetes_environment_variables_section(self):
         test_config = '''
@@ -412,17 +403,16 @@ AIRFLOW_HOME = /root/airflow
         test_conf = AirflowConfigParser(default_config=parameterized_config(test_config_default))
         test_conf.read_string(test_config)
 
-        self.assertEqual(
-            OrderedDict([('key1', 'hello'), ('AIRFLOW_HOME', '/root/airflow')]),
-            test_conf.getsection('kubernetes_environment_variables'),
+        assert OrderedDict([('key1', 'hello'), ('AIRFLOW_HOME', '/root/airflow')]) == test_conf.getsection(
+            'kubernetes_environment_variables'
         )
 
     def test_broker_transport_options(self):
         section_dict = conf.getsection("celery_broker_transport_options")
-        self.assertTrue(isinstance(section_dict['visibility_timeout'], int))
-        self.assertTrue(isinstance(section_dict['_test_only_bool'], bool))
-        self.assertTrue(isinstance(section_dict['_test_only_float'], float))
-        self.assertTrue(isinstance(section_dict['_test_only_string'], str))
+        assert isinstance(section_dict['visibility_timeout'], int)
+        assert isinstance(section_dict['_test_only_bool'], bool)
+        assert isinstance(section_dict['_test_only_float'], float)
+        assert isinstance(section_dict['_test_only_string'], str)
 
     @conf_vars(
         {
@@ -440,12 +430,12 @@ AIRFLOW_HOME = /root/airflow
         # Remove it so we are sure we use the right setting
         conf.remove_option('celery', 'worker_concurrency')
 
-        with self.assertWarns(DeprecationWarning):
+        with pytest.warns(DeprecationWarning):
             with mock.patch.dict('os.environ', AIRFLOW__CELERY__CELERYD_CONCURRENCY="99"):
-                self.assertEqual(conf.getint('celery', 'worker_concurrency'), 99)
+                assert conf.getint('celery', 'worker_concurrency') == 99
 
-        with self.assertWarns(DeprecationWarning), conf_vars({('celery', 'celeryd_concurrency'): '99'}):
-            self.assertEqual(conf.getint('celery', 'worker_concurrency'), 99)
+        with pytest.warns(DeprecationWarning), conf_vars({('celery', 'celeryd_concurrency'): '99'}):
+            assert conf.getint('celery', 'worker_concurrency') == 99
 
     @conf_vars(
         {
@@ -464,12 +454,12 @@ AIRFLOW_HOME = /root/airflow
         conf.remove_option('core', 'logging_level')
         conf.remove_option('logging', 'logging_level')
 
-        with self.assertWarns(DeprecationWarning):
+        with pytest.warns(DeprecationWarning):
             with mock.patch.dict('os.environ', AIRFLOW__CORE__LOGGING_LEVEL="VALUE"):
-                self.assertEqual(conf.get('logging', 'logging_level'), "VALUE")
+                assert conf.get('logging', 'logging_level') == "VALUE"
 
-        with self.assertWarns(DeprecationWarning), conf_vars({('core', 'logging_level'): 'VALUE'}):
-            self.assertEqual(conf.get('logging', 'logging_level'), "VALUE")
+        with pytest.warns(DeprecationWarning), conf_vars({('core', 'logging_level'): 'VALUE'}):
+            assert conf.get('logging', 'logging_level') == "VALUE"
 
     @conf_vars(
         {
@@ -486,11 +476,11 @@ AIRFLOW_HOME = /root/airflow
 
         conf.remove_option('celery', 'result_backend')
         with conf_vars({('celery', 'celery_result_backend_cmd'): '/bin/echo 99'}):
-            with self.assertWarns(DeprecationWarning):
+            with pytest.warns(DeprecationWarning):
                 tmp = None
                 if 'AIRFLOW__CELERY__RESULT_BACKEND' in os.environ:
                     tmp = os.environ.pop('AIRFLOW__CELERY__RESULT_BACKEND')
-                self.assertEqual(conf.getint('celery', 'result_backend'), 99)
+                assert conf.getint('celery', 'result_backend') == 99
                 if tmp:
                     os.environ['AIRFLOW__CELERY__RESULT_BACKEND'] = tmp
 
@@ -516,14 +506,14 @@ AIRFLOW_HOME = /root/airflow
             test_conf.validate()
             return test_conf
 
-        with self.assertWarns(FutureWarning):
+        with pytest.warns(FutureWarning):
             test_conf = make_config()
-            self.assertEqual(test_conf.get('core', 'hostname_callable'), 'socket.getfqdn')
+            assert test_conf.get('core', 'hostname_callable') == 'socket.getfqdn'
 
-        with self.assertWarns(FutureWarning):
+        with pytest.warns(FutureWarning):
             with unittest.mock.patch.dict('os.environ', AIRFLOW__CORE__HOSTNAME_CALLABLE='socket:getfqdn'):
                 test_conf = make_config()
-                self.assertEqual(test_conf.get('core', 'hostname_callable'), 'socket.getfqdn')
+                assert test_conf.get('core', 'hostname_callable') == 'socket.getfqdn'
 
         with reset_warning_registry():
             with warnings.catch_warnings(record=True) as warning:
@@ -532,8 +522,8 @@ AIRFLOW_HOME = /root/airflow
                     AIRFLOW__CORE__HOSTNAME_CALLABLE='CarrierPigeon',
                 ):
                     test_conf = make_config()
-                    self.assertEqual(test_conf.get('core', 'hostname_callable'), 'CarrierPigeon')
-                    self.assertListEqual([], warning)
+                    assert test_conf.get('core', 'hostname_callable') == 'CarrierPigeon'
+                    assert [] == warning
 
     def test_deprecated_funcs(self):
         for func in [
@@ -548,7 +538,7 @@ AIRFLOW_HOME = /root/airflow
             'set',
         ]:
             with mock.patch(f'airflow.configuration.conf.{func}') as mock_method:
-                with self.assertWarns(DeprecationWarning):
+                with pytest.warns(DeprecationWarning):
                     getattr(configuration, func)()
                 mock_method.assert_called_once()
 
@@ -564,48 +554,48 @@ notacommand = OK
             # AIRFLOW__TESTCMDENV__ITSACOMMAND_CMD maps to ('testcmdenv', 'itsacommand') in
             # sensitive_config_values and therefore should return 'OK' from the environment variable's
             # echo command, and must not return 'NOT OK' from the configuration
-            self.assertEqual(test_cmdenv_conf.get('testcmdenv', 'itsacommand'), 'OK')
+            assert test_cmdenv_conf.get('testcmdenv', 'itsacommand') == 'OK'
             # AIRFLOW__TESTCMDENV__NOTACOMMAND_CMD maps to no entry in sensitive_config_values and therefore
             # the option should return 'OK' from the configuration, and must not return 'NOT OK' from
             # the environment variable's echo command
-            self.assertEqual(test_cmdenv_conf.get('testcmdenv', 'notacommand'), 'OK')
+            assert test_cmdenv_conf.get('testcmdenv', 'notacommand') == 'OK'
 
     def test_parameterized_config_gen(self):
 
         cfg = parameterized_config(DEFAULT_CONFIG)
 
         # making sure some basic building blocks are present:
-        self.assertIn("[core]", cfg)
-        self.assertIn("dags_folder", cfg)
-        self.assertIn("sql_alchemy_conn", cfg)
-        self.assertIn("fernet_key", cfg)
+        assert "[core]" in cfg
+        assert "dags_folder" in cfg
+        assert "sql_alchemy_conn" in cfg
+        assert "fernet_key" in cfg
 
         # making sure replacement actually happened
-        self.assertNotIn("{AIRFLOW_HOME}", cfg)
-        self.assertNotIn("{FERNET_KEY}", cfg)
+        assert "{AIRFLOW_HOME}" not in cfg
+        assert "{FERNET_KEY}" not in cfg
 
     def test_config_use_original_when_original_and_fallback_are_present(self):
-        self.assertTrue(conf.has_option("core", "FERNET_KEY"))
-        self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
+        assert conf.has_option("core", "FERNET_KEY")
+        assert not conf.has_option("core", "FERNET_KEY_CMD")
 
         fernet_key = conf.get('core', 'FERNET_KEY')
 
         with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):
             fallback_fernet_key = conf.get("core", "FERNET_KEY")
 
-        self.assertEqual(fernet_key, fallback_fernet_key)
+        assert fernet_key == fallback_fernet_key
 
     def test_config_throw_error_when_original_and_fallback_is_absent(self):
-        self.assertTrue(conf.has_option("core", "FERNET_KEY"))
-        self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
+        assert conf.has_option("core", "FERNET_KEY")
+        assert not conf.has_option("core", "FERNET_KEY_CMD")
 
         with conf_vars({('core', 'fernet_key'): None}):
-            with self.assertRaises(AirflowConfigException) as cm:
+            with pytest.raises(AirflowConfigException) as ctx:
                 conf.get("core", "FERNET_KEY")
 
-        exception = str(cm.exception)
+        exception = str(ctx.value)
         message = "section/key [core/fernet_key] not found in config"
-        self.assertEqual(message, exception)
+        assert message == exception
 
     def test_config_override_original_when_non_empty_envvar_is_provided(self):
         key = "AIRFLOW__CORE__FERNET_KEY"
@@ -614,7 +604,7 @@ notacommand = OK
         with mock.patch.dict('os.environ', {key: value}):
             fernet_key = conf.get('core', 'FERNET_KEY')
 
-        self.assertEqual(value, fernet_key)
+        assert value == fernet_key
 
     def test_config_override_original_when_empty_envvar_is_provided(self):
         key = "AIRFLOW__CORE__FERNET_KEY"
@@ -623,40 +613,41 @@ notacommand = OK
         with mock.patch.dict('os.environ', {key: value}):
             fernet_key = conf.get('core', 'FERNET_KEY')
 
-        self.assertEqual(value, fernet_key)
+        assert value == fernet_key
 
     @mock.patch.dict("os.environ", {"AIRFLOW__CORE__DAGS_FOLDER": "/tmp/test_folder"})
     def test_write_should_respect_env_variable(self):
         with io.StringIO() as string_file:
             conf.write(string_file)
             content = string_file.getvalue()
-        self.assertIn("dags_folder = /tmp/test_folder", content)
+        assert "dags_folder = /tmp/test_folder" in content
 
     def test_run_command(self):
         write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
 
         cmd = f'import sys; {write}; sys.stdout.flush()'
 
-        self.assertEqual(run_command(f"python -c '{cmd}'"), '\u1000foo')
+        assert run_command(f"python -c '{cmd}'") == '\u1000foo'
 
-        self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n')
-        self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
+        assert run_command('echo "foo bar"') == 'foo bar\n'
+        with pytest.raises(AirflowConfigException):
+            run_command('bash -c "exit 1"')
 
     def test_confirm_unittest_mod(self):
-        self.assertTrue(conf.get('core', 'unit_test_mode'))
+        assert conf.get('core', 'unit_test_mode')
 
     @conf_vars({("core", "store_serialized_dags"): "True"})
     def test_store_dag_code_default_config(self):
         store_serialized_dags = conf.getboolean('core', 'store_serialized_dags', fallback=False)
         store_dag_code = conf.getboolean("core", "store_dag_code", fallback=store_serialized_dags)
-        self.assertFalse(conf.has_option("core", "store_dag_code"))
-        self.assertTrue(store_serialized_dags)
-        self.assertTrue(store_dag_code)
+        assert not conf.has_option("core", "store_dag_code")
+        assert store_serialized_dags
+        assert store_dag_code
 
     @conf_vars({("core", "store_serialized_dags"): "True", ("core", "store_dag_code"): "False"})
     def test_store_dag_code_config_when_set(self):
         store_serialized_dags = conf.getboolean('core', 'store_serialized_dags', fallback=False)
         store_dag_code = conf.getboolean("core", "store_dag_code", fallback=store_serialized_dags)
-        self.assertTrue(conf.has_option("core", "store_dag_code"))
-        self.assertTrue(store_serialized_dags)
-        self.assertFalse(store_dag_code)
+        assert conf.has_option("core", "store_dag_code")
+        assert store_serialized_dags
+        assert not store_dag_code
diff --git a/tests/core/test_core.py b/tests/core/test_core.py
index ba05f02..5073aa4 100644
--- a/tests/core/test_core.py
+++ b/tests/core/test_core.py
@@ -23,6 +23,8 @@ import unittest
 from datetime import timedelta
 from time import sleep
 
+import pytest
+
 from airflow import settings
 from airflow.exceptions import AirflowException, AirflowTaskTimeout
 from airflow.hooks.base import BaseHook
@@ -124,31 +126,28 @@ class TestCore(unittest.TestCase):
         """
         msg = 'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).'
         with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
-            with self.assertWarns(PendingDeprecationWarning) as warning:
+            with pytest.warns(PendingDeprecationWarning) as warnings:
                 BashOperator(
                     task_id='test_illegal_args',
                     bash_command='echo success',
                     dag=self.dag,
                     illegal_argument_1234='hello?',
                 )
-                assert any(msg in str(w) for w in warning.warnings)
+                assert any(msg in str(w) for w in warnings)
 
     def test_illegal_args_forbidden(self):
         """
         Tests that operators raise exceptions on illegal arguments when
         illegal arguments are not allowed.
         """
-        with self.assertRaises(AirflowException) as ctx:
+        with pytest.raises(AirflowException) as ctx:
             BashOperator(
                 task_id='test_illegal_args',
                 bash_command='echo success',
                 dag=self.dag,
                 illegal_argument_1234='hello?',
             )
-        self.assertIn(
-            'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).',
-            str(ctx.exception),
-        )
+        assert 'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).' in str(ctx.value)
 
     def test_bash_operator(self):
         op = BashOperator(task_id='test_bash_operator', bash_command="echo success", dag=self.dag)
@@ -176,7 +175,8 @@ class TestCore(unittest.TestCase):
             bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
             dag=self.dag,
         )
-        self.assertRaises(AirflowTaskTimeout, op.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
+        with pytest.raises(AirflowTaskTimeout):
+            op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
         sleep(2)
         pid = -1
         for proc in psutil.process_iter():
@@ -201,10 +201,9 @@ class TestCore(unittest.TestCase):
             dag=self.dag,
             on_failure_callback=check_failure,
         )
-        self.assertRaises(
-            AirflowException, op.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True
-        )
-        self.assertTrue(data['called'])
+        with pytest.raises(AirflowException):
+            op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
+        assert data['called']
 
     def test_dryrun(self):
         op = BashOperator(task_id='test_dryrun', bash_command="echo success", dag=self.dag)
@@ -226,9 +225,8 @@ class TestCore(unittest.TestCase):
             python_callable=lambda: sleep(5),
             dag=self.dag,
         )
-        self.assertRaises(
-            AirflowTaskTimeout, op.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True
-        )
+        with pytest.raises(AirflowTaskTimeout):
+            op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
 
     def test_python_op(self):
         def test_py_op(templates_dict, ds, **kwargs):
@@ -243,7 +241,7 @@ class TestCore(unittest.TestCase):
 
     def test_complex_template(self):
         def verify_templated_field(context):
-            self.assertEqual(context['ti'].task.some_templated_field['bar'][1], context['ds'])
+            assert context['ti'].task.some_templated_field['bar'][1] == context['ds']
 
         op = OperatorSubclass(
             task_id='test_complex_template',
@@ -282,26 +280,26 @@ class TestCore(unittest.TestCase):
         context = ti.get_template_context()
 
         # DEFAULT DATE is 2015-01-01
-        self.assertEqual(context['ds'], '2015-01-01')
-        self.assertEqual(context['ds_nodash'], '20150101')
+        assert context['ds'] == '2015-01-01'
+        assert context['ds_nodash'] == '20150101'
 
         # next_ds is 2015-01-02 as the dag interval is daily
-        self.assertEqual(context['next_ds'], '2015-01-02')
-        self.assertEqual(context['next_ds_nodash'], '20150102')
+        assert context['next_ds'] == '2015-01-02'
+        assert context['next_ds_nodash'] == '20150102'
 
         # prev_ds is 2014-12-31 as the dag interval is daily
-        self.assertEqual(context['prev_ds'], '2014-12-31')
-        self.assertEqual(context['prev_ds_nodash'], '20141231')
+        assert context['prev_ds'] == '2014-12-31'
+        assert context['prev_ds_nodash'] == '20141231'
 
-        self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
-        self.assertEqual(context['ts_nodash'], '20150101T000000')
-        self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
+        assert context['ts'] == '2015-01-01T00:00:00+00:00'
+        assert context['ts_nodash'] == '20150101T000000'
+        assert context['ts_nodash_with_tz'] == '20150101T000000+0000'
 
-        self.assertEqual(context['yesterday_ds'], '2014-12-31')
-        self.assertEqual(context['yesterday_ds_nodash'], '20141231')
+        assert context['yesterday_ds'] == '2014-12-31'
+        assert context['yesterday_ds_nodash'] == '20141231'
 
-        self.assertEqual(context['tomorrow_ds'], '2015-01-02')
-        self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
+        assert context['tomorrow_ds'] == '2015-01-02'
+        assert context['tomorrow_ds_nodash'] == '20150102'
 
     def test_local_task_job(self):
         TI = TaskInstance
@@ -319,7 +317,7 @@ class TestCore(unittest.TestCase):
         ti.run(ignore_ti_state=True)
 
     def test_bad_trigger_rule(self):
-        with self.assertRaises(AirflowException):
+        with pytest.raises(AirflowException):
             DummyOperator(task_id='test_bad_trigger', trigger_rule="non_existent", dag=self.dag)
 
     def test_terminate_task(self):
@@ -341,7 +339,7 @@ class TestCore(unittest.TestCase):
         session = settings.Session()
         ti.refresh_from_db(session=session)
         # making sure it's actually running
-        self.assertEqual(State.RUNNING, ti.state)
+        assert State.RUNNING == ti.state
         ti = (
             session.query(TI)
             .filter_by(dag_id=task.dag_id, task_id=task.task_id, execution_date=DEFAULT_DATE)
@@ -356,7 +354,7 @@ class TestCore(unittest.TestCase):
 
         # making sure that the task ended up as failed
         ti.refresh_from_db(session=session)
-        self.assertEqual(State.FAILED, ti.state)
+        assert State.FAILED == ti.state
         session.close()
 
     def test_task_fail_duration(self):
@@ -390,9 +388,9 @@ class TestCore(unittest.TestCase):
             .all()
         )
 
-        self.assertEqual(0, len(op1_fails))
-        self.assertEqual(1, len(op2_fails))
-        self.assertGreaterEqual(sum([f.duration for f in op2_fails]), 3)
+        assert 0 == len(op1_fails)
+        assert 1 == len(op2_fails)
+        assert sum([f.duration for f in op2_fails]) >= 3
 
     def test_externally_triggered_dagrun(self):
         TI = TaskInstance
@@ -418,8 +416,8 @@ class TestCore(unittest.TestCase):
         context = ti.get_template_context()
 
         # next_ds/prev_ds should be the execution date for manually triggered runs
-        self.assertEqual(context['next_ds'], execution_ds)
-        self.assertEqual(context['next_ds_nodash'], execution_ds_nodash)
+        assert context['next_ds'] == execution_ds
+        assert context['next_ds_nodash'] == execution_ds_nodash
 
-        self.assertEqual(context['prev_ds'], execution_ds)
-        self.assertEqual(context['prev_ds_nodash'], execution_ds_nodash)
+        assert context['prev_ds'] == execution_ds
+        assert context['prev_ds_nodash'] == execution_ds_nodash
diff --git a/tests/core/test_core_to_contrib.py b/tests/core/test_core_to_contrib.py
index e91e69d..4e6d64b 100644
--- a/tests/core/test_core_to_contrib.py
+++ b/tests/core/test_core_to_contrib.py
@@ -19,9 +19,9 @@
 import importlib
 import sys
 from inspect import isabstract
-from typing import Any
 from unittest import TestCase, mock
 
+import pytest
 from parameterized import parameterized
 
 from airflow.models.baseoperator import BaseOperator
@@ -30,20 +30,20 @@ from tests.deprecated_classes import ALL, RENAMED_ALL
 
 class TestMovingCoreToContrib(TestCase):
     @staticmethod
-    def assert_warning(msg: str, warning: Any):
+    def assert_warning(msg: str, warnings):
         error = f"Text '{msg}' not in warnings"
-        assert any(msg in str(w) for w in warning.warnings), error
+        assert any(msg in str(w) for w in warnings), error
 
     def assert_is_subclass(self, clazz, other):
-        self.assertTrue(issubclass(clazz, other), f"{clazz} is not subclass of {other}")
+        assert issubclass(clazz, other), f"{clazz} is not subclass of {other}"
 
     def assert_proper_import(self, old_resource, new_resource):
         new_path, _, _ = new_resource.rpartition(".")
         old_path, _, _ = old_resource.rpartition(".")
-        with self.assertWarns(DeprecationWarning) as warning_msg:
+        with pytest.warns(DeprecationWarning) as warnings:
             # Reload to see deprecation warning each time
             importlib.reload(importlib.import_module(old_path))
-            self.assert_warning(new_path, warning_msg)
+            self.assert_warning(new_path, warnings)
 
     def skip_test_with_mssql_in_py38(self, path_a="", path_b=""):
         py_38 = sys.version_info >= (3, 8)
@@ -75,15 +75,15 @@ class TestMovingCoreToContrib(TestCase):
         self.skip_test_with_mssql_in_py38(new_module, old_module)
         deprecation_warning_msg = "This class is deprecated."
         old_module_class = self.get_class_from_path(old_module)
-        with self.assertWarnsRegex(DeprecationWarning, deprecation_warning_msg) as wrn:
+        with pytest.warns(DeprecationWarning, match=deprecation_warning_msg) as warnings:
             with mock.patch(f"{new_module}.__init__") as init_mock:
                 init_mock.return_value = None
                 klass = old_module_class()
                 if isinstance(klass, BaseOperator):
                     # In case of operators we are validating that proper stacklevel
                     # is used (=3 or =4 if @apply_defaults)
-                    assert len(wrn.warnings) == 1
-                    assert wrn.warnings[0].filename == __file__
+                    assert len(warnings) == 1
+                    assert warnings[0].filename == __file__
                 init_mock.assert_called_once_with()
 
     @parameterized.expand(ALL)
diff --git a/tests/core/test_impersonation_tests.py b/tests/core/test_impersonation_tests.py
index 686142e..bc1fa9b 100644
--- a/tests/core/test_impersonation_tests.py
+++ b/tests/core/test_impersonation_tests.py
@@ -140,7 +140,7 @@ class TestImpersonation(unittest.TestCase):
         ti = models.TaskInstance(task=dag.get_task(task_id), execution_date=DEFAULT_DATE)
         ti.refresh_from_db()
 
-        self.assertEqual(ti.state, State.SUCCESS)
+        assert ti.state == State.SUCCESS
 
     def test_impersonation(self):
         """
@@ -203,7 +203,7 @@ class TestImpersonationWithCustomPythonPath(unittest.TestCase):
         ti = models.TaskInstance(task=dag.get_task(task_id), execution_date=DEFAULT_DATE)
         ti.refresh_from_db()
 
-        self.assertEqual(ti.state, State.SUCCESS)
+        assert ti.state == State.SUCCESS
 
     @mock_custom_module_path(TEST_UTILS_FOLDER)
     def test_impersonation_custom(self):
diff --git a/tests/core/test_logging_config.py b/tests/core/test_logging_config.py
index 9c69833..635af8d 100644
--- a/tests/core/test_logging_config.py
+++ b/tests/core/test_logging_config.py
@@ -25,6 +25,8 @@ import tempfile
 import unittest
 from unittest.mock import patch
 
+import pytest
+
 from airflow.configuration import conf
 from tests.test_utils.config import conf_vars
 
@@ -192,7 +194,7 @@ class TestLoggingSettings(unittest.TestCase):
         with settings_context(SETTINGS_FILE_INVALID):
             with patch.object(log, 'error') as mock_info:
                 # Load config
-                with self.assertRaises(ValueError):
+                with pytest.raises(ValueError):
                     configure_logging()
 
                 mock_info.assert_called_once_with(
@@ -230,7 +232,7 @@ class TestLoggingSettings(unittest.TestCase):
         with settings_context(SETTINGS_FILE_EMPTY):
             from airflow.logging_config import configure_logging
 
-            with self.assertRaises(ImportError):
+            with pytest.raises(ImportError):
                 configure_logging()
 
     # When the key is not available in the configuration
@@ -254,9 +256,9 @@ class TestLoggingSettings(unittest.TestCase):
         from airflow.logging_config import configure_logging
 
         with conf_vars({('logging', 'task_log_reader'): 'file.task'}):
-            with self.assertWarnsRegex(DeprecationWarning, r'file.task'):
+            with pytest.warns(DeprecationWarning, match=r'file.task'):
                 configure_logging()
-            self.assertEqual(conf.get('logging', 'task_log_reader'), 'task')
+            assert conf.get('logging', 'task_log_reader') == 'task'
 
     def test_loading_remote_logging_with_wasb_handler(self):
         """Test if logging can be configured successfully for Azure Blob Storage"""
@@ -275,4 +277,4 @@ class TestLoggingSettings(unittest.TestCase):
             configure_logging()
 
         logger = logging.getLogger('airflow.task')
-        self.assertIsInstance(logger.handlers[0], WasbTaskHandler)
+        assert isinstance(logger.handlers[0], WasbTaskHandler)
diff --git a/tests/core/test_providers_manager.py b/tests/core/test_providers_manager.py
index ec05da2..7d80c58 100644
--- a/tests/core/test_providers_manager.py
+++ b/tests/core/test_providers_manager.py
@@ -15,6 +15,7 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+import re
 import unittest
 
 from airflow.providers_manager import ProvidersManager
@@ -201,26 +202,26 @@ class TestProviderManager(unittest.TestCase):
         for provider in provider_list:
             package_name = provider_manager.providers[provider][1]['package-name']
             version = provider_manager.providers[provider][0]
-            self.assertRegex(version, r'[0-9]*\.[0-9]*\.[0-9]*.*')
-            self.assertEqual(package_name, provider)
-        self.assertEqual(ALL_PROVIDERS, provider_list)
+            assert re.search(r'[0-9]*\.[0-9]*\.[0-9]*.*', version)
+            assert package_name == provider
+        assert ALL_PROVIDERS == provider_list
 
     def test_hooks(self):
         provider_manager = ProvidersManager()
         connections_list = list(provider_manager.hooks.keys())
-        self.assertEqual(CONNECTIONS_LIST, connections_list)
+        assert CONNECTIONS_LIST == connections_list
 
     def test_connection_form_widgets(self):
         provider_manager = ProvidersManager()
         connections_form_widgets = list(provider_manager.connection_form_widgets.keys())
-        self.assertEqual(CONNECTION_FORM_WIDGETS, connections_form_widgets)
+        assert CONNECTION_FORM_WIDGETS == connections_form_widgets
 
     def test_field_behaviours(self):
         provider_manager = ProvidersManager()
         connections_with_field_behaviours = list(provider_manager.field_behaviours.keys())
-        self.assertEqual(CONNECTIONS_WITH_FIELD_BEHAVIOURS, connections_with_field_behaviours)
+        assert CONNECTIONS_WITH_FIELD_BEHAVIOURS == connections_with_field_behaviours
 
     def test_extra_links(self):
         provider_manager = ProvidersManager()
         extra_link_class_names = list(provider_manager.extra_links_class_names)
-        self.assertEqual(EXTRA_LINKS, extra_link_class_names)
+        assert EXTRA_LINKS == extra_link_class_names
diff --git a/tests/core/test_sentry.py b/tests/core/test_sentry.py
index bfff584..44a39d9 100644
--- a/tests/core/test_sentry.py
+++ b/tests/core/test_sentry.py
@@ -91,7 +91,7 @@ class TestSentryHook(unittest.TestCase):
         self.sentry.add_tagging(task_instance=self.ti)
         with configure_scope() as scope:
             for key, value in scope._tags.items():
-                self.assertEqual(TEST_SCOPE[key], value)
+                assert TEST_SCOPE[key] == value
 
     @freeze_time(CRUMB_DATE.isoformat())
     def test_add_breadcrumbs(self):
@@ -103,4 +103,4 @@ class TestSentryHook(unittest.TestCase):
 
         with configure_scope() as scope:
             test_crumb = scope._breadcrumbs.pop()
-            self.assertEqual(CRUMB, test_crumb)
+            assert CRUMB == test_crumb
diff --git a/tests/core/test_settings.py b/tests/core/test_settings.py
index 0915ec3..2c7bb9f 100644
--- a/tests/core/test_settings.py
+++ b/tests/core/test_settings.py
@@ -22,6 +22,8 @@ import tempfile
 import unittest
 from unittest.mock import MagicMock, call
 
+import pytest
+
 from airflow.exceptions import AirflowClusterPolicyViolation
 from tests.test_utils.config import conf_vars
 
@@ -112,7 +114,7 @@ class TestLocalSettings(unittest.TestCase):
 
             settings.import_local_settings()
 
-            with self.assertRaises(AttributeError):
+            with pytest.raises(AttributeError):
                 settings.not_policy()  # pylint: disable=no-member
 
     def test_import_with_dunder_all(self):
@@ -179,7 +181,7 @@ class TestLocalSettings(unittest.TestCase):
 
             task_instance = MagicMock()
             task_instance.owner = 'airflow'
-            with self.assertRaises(AirflowClusterPolicyViolation):
+            with pytest.raises(AirflowClusterPolicyViolation):
                 settings.task_must_have_owners(task_instance)  # pylint: disable=no-member
 
 
@@ -190,10 +192,10 @@ class TestUpdatedConfigNames(unittest.TestCase):
     def test_updates_deprecated_session_timeout_config_val_when_new_config_val_is_default(self):
         from airflow import settings
 
-        with self.assertWarns(DeprecationWarning):
+        with pytest.warns(DeprecationWarning):
             session_lifetime_config = settings.get_session_lifetime_config()
             minutes_in_five_days = 5 * 24 * 60
-            self.assertEqual(session_lifetime_config, minutes_in_five_days)
+            assert session_lifetime_config == minutes_in_five_days
 
     @conf_vars(
         {("webserver", "session_lifetime_days"): '5', ("webserver", "session_lifetime_minutes"): '43201'}
@@ -202,7 +204,7 @@ class TestUpdatedConfigNames(unittest.TestCase):
         from airflow import settings
 
         session_lifetime_config = settings.get_session_lifetime_config()
-        self.assertEqual(session_lifetime_config, 43201)
+        assert session_lifetime_config == 43201
 
     @conf_vars({("webserver", "session_lifetime_days"): ''})
     def test_uses_updated_session_timeout_config_by_default(self):
@@ -210,4 +212,4 @@ class TestUpdatedConfigNames(unittest.TestCase):
 
         session_lifetime_config = settings.get_session_lifetime_config()
         default_timeout_minutes = 30 * 24 * 60
-        self.assertEqual(session_lifetime_config, default_timeout_minutes)
+        assert session_lifetime_config == default_timeout_minutes
diff --git a/tests/core/test_sqlalchemy_config.py b/tests/core/test_sqlalchemy_config.py
index a7cd08b..c4c909b 100644
--- a/tests/core/test_sqlalchemy_config.py
+++ b/tests/core/test_sqlalchemy_config.py
@@ -19,6 +19,7 @@
 import unittest
 from unittest.mock import patch
 
+import pytest
 from sqlalchemy.pool import NullPool
 
 from airflow import settings
@@ -92,6 +93,6 @@ class TestSqlAlchemySettings(unittest.TestCase):
             ('core', 'sql_alchemy_connect_args'): 'does.not.exist',
             ('core', 'sql_alchemy_pool_enabled'): 'False',
         }
-        with self.assertRaises(AirflowConfigException):
+        with pytest.raises(AirflowConfigException):
             with conf_vars(config):
                 settings.configure_orm()
diff --git a/tests/core/test_stats.py b/tests/core/test_stats.py
index eebdde2..428192b 100644
--- a/tests/core/test_stats.py
+++ b/tests/core/test_stats.py
@@ -21,6 +21,7 @@ import unittest
 from unittest import mock
 from unittest.mock import Mock
 
+import pytest
 import statsd
 
 import airflow
@@ -107,7 +108,7 @@ class TestStats(unittest.TestCase):
     )
     def test_load_custom_statsd_client(self):
         importlib.reload(airflow.stats)
-        self.assertEqual('CustomStatsd', type(airflow.stats.Stats.statsd).__name__)
+        assert 'CustomStatsd' == type(airflow.stats.Stats.statsd).__name__  # noqa: E721
 
     @conf_vars(
         {
@@ -127,9 +128,9 @@ class TestStats(unittest.TestCase):
         }
     )
     def test_load_invalid_custom_stats_client(self):
-        with self.assertRaisesRegex(
+        with pytest.raises(
             AirflowConfigException,
-            re.escape(
+            match=re.escape(
                 'Your custom Statsd client must extend the statsd.'
                 'StatsClient in order to ensure backwards compatibility.'
             ),
diff --git a/tests/executors/test_base_executor.py b/tests/executors/test_base_executor.py
index 06fe030..2251878 100644
--- a/tests/executors/test_base_executor.py
+++ b/tests/executors/test_base_executor.py
@@ -41,9 +41,9 @@ class TestBaseExecutor(unittest.TestCase):
         executor.event_buffer[key2] = state, None
         executor.event_buffer[key3] = state, None
 
-        self.assertEqual(len(executor.get_event_buffer(("my_dag1",))), 1)
-        self.assertEqual(len(executor.get_event_buffer()), 2)
-        self.assertEqual(len(executor.event_buffer), 0)
+        assert len(executor.get_event_buffer(("my_dag1",))) == 1
+        assert len(executor.get_event_buffer()) == 2
+        assert len(executor.event_buffer) == 0
 
     @mock.patch('airflow.executors.base_executor.BaseExecutor.sync')
     @mock.patch('airflow.executors.base_executor.BaseExecutor.trigger_tasks')
@@ -71,4 +71,4 @@ class TestBaseExecutor(unittest.TestCase):
         key2 = TaskInstance(task=task_2, execution_date=date)
         key3 = TaskInstance(task=task_3, execution_date=date)
         tis = [key1, key2, key3]
-        self.assertEqual(BaseExecutor().try_adopt_task_instances(tis), tis)
+        assert BaseExecutor().try_adopt_task_instances(tis) == tis
diff --git a/tests/executors/test_celery_executor.py b/tests/executors/test_celery_executor.py
index 38aa583..944fa49 100644
--- a/tests/executors/test_celery_executor.py
+++ b/tests/executors/test_celery_executor.py
@@ -116,7 +116,7 @@ class TestCeleryExecutor(unittest.TestCase):
 
         with _prepare_app(broker_url, execute=fake_execute_command) as app:
             executor = celery_executor.CeleryExecutor()
-            self.assertEqual(executor.tasks, {})
+            assert executor.tasks == {}
             executor.start()
 
             with start_worker(app=app, logfile=sys.stdout, loglevel='info'):
@@ -146,32 +146,25 @@ class TestCeleryExecutor(unittest.TestCase):
 
                 executor._process_tasks(task_tuples_to_send)
 
-                self.assertEqual(
-                    list(executor.tasks.keys()),
-                    [
-                        ('success', 'fake_simple_ti', execute_date, 0),
-                        ('fail', 'fake_simple_ti', execute_date, 0),
-                    ],
-                )
-                self.assertEqual(
-                    executor.event_buffer[('success', 'fake_simple_ti', execute_date, 0)][0], State.QUEUED
-                )
-                self.assertEqual(
-                    executor.event_buffer[('fail', 'fake_simple_ti', execute_date, 0)][0], State.QUEUED
+                assert list(executor.tasks.keys()) == [
+                    ('success', 'fake_simple_ti', execute_date, 0),
+                    ('fail', 'fake_simple_ti', execute_date, 0),
+                ]
+                assert (
+                    executor.event_buffer[('success', 'fake_simple_ti', execute_date, 0)][0] == State.QUEUED
                 )
+                assert executor.event_buffer[('fail', 'fake_simple_ti', execute_date, 0)][0] == State.QUEUED
 
                 executor.end(synchronous=True)
 
-        self.assertEqual(
-            executor.event_buffer[('success', 'fake_simple_ti', execute_date, 0)][0], State.SUCCESS
-        )
-        self.assertEqual(executor.event_buffer[('fail', 'fake_simple_ti', execute_date, 0)][0], State.FAILED)
+        assert executor.event_buffer[('success', 'fake_simple_ti', execute_date, 0)][0] == State.SUCCESS
+        assert executor.event_buffer[('fail', 'fake_simple_ti', execute_date, 0)][0] == State.FAILED
 
-        self.assertNotIn('success', executor.tasks)
-        self.assertNotIn('fail', executor.tasks)
+        assert 'success' not in executor.tasks
+        assert 'fail' not in executor.tasks
 
-        self.assertEqual(executor.queued_tasks, {})
-        self.assertEqual(timedelta(0, 600), executor.task_adoption_timeout)
+        assert executor.queued_tasks == {}
+        assert timedelta(0, 600) == executor.task_adoption_timeout
 
     @pytest.mark.integration("redis")
     @pytest.mark.integration("rabbitmq")
@@ -198,8 +191,8 @@ class TestCeleryExecutor(unittest.TestCase):
             executor.queued_tasks[key] = value_tuple
             executor.task_publish_retries[key] = 1
             executor.heartbeat()
-        self.assertEqual(0, len(executor.queued_tasks), "Task should no longer be queued")
-        self.assertEqual(executor.event_buffer[('fail', 'fake_simple_ti', when, 0)][0], State.FAILED)
+        assert 0 == len(executor.queued_tasks), "Task should no longer be queued"
+        assert executor.event_buffer[('fail', 'fake_simple_ti', when, 0)][0] == State.FAILED
 
     @pytest.mark.integration("redis")
     @pytest.mark.integration("rabbitmq")
@@ -216,8 +209,8 @@ class TestCeleryExecutor(unittest.TestCase):
             # fake_execute_command takes no arguments while execute_command takes 1,
             # which will cause TypeError when calling task.apply_async()
             executor = celery_executor.CeleryExecutor()
-            self.assertEqual(executor.task_publish_retries, {})
-            self.assertEqual(executor.task_publish_max_retries, 3, msg="Assert Default Max Retries is 3")
+            assert executor.task_publish_retries == {}
+            assert executor.task_publish_max_retries == 3, "Assert Default Max Retries is 3"
 
             task = BashOperator(
                 task_id="test", bash_command="true", dag=DAG(dag_id='id'), start_date=datetime.now()
@@ -234,39 +227,36 @@ class TestCeleryExecutor(unittest.TestCase):
 
             # Test that when heartbeat is called again, task is published again to Celery Queue
             executor.heartbeat()
-            self.assertEqual(dict(executor.task_publish_retries), {key: 2})
-            self.assertEqual(1, len(executor.queued_tasks), "Task should remain in queue")
-            self.assertEqual(executor.event_buffer, {})
-            self.assertIn(
+            assert dict(executor.task_publish_retries) == {key: 2}
+            assert 1 == len(executor.queued_tasks), "Task should remain in queue"
+            assert executor.event_buffer == {}
+            assert (
                 "INFO:airflow.executors.celery_executor.CeleryExecutor:"
-                f"[Try 1 of 3] Task Timeout Error for Task: ({key}).",
-                cm.output,
+                f"[Try 1 of 3] Task Timeout Error for Task: ({key})." in cm.output
             )
 
             executor.heartbeat()
-            self.assertEqual(dict(executor.task_publish_retries), {key: 3})
-            self.assertEqual(1, len(executor.queued_tasks), "Task should remain in queue")
-            self.assertEqual(executor.event_buffer, {})
-            self.assertIn(
+            assert dict(executor.task_publish_retries) == {key: 3}
+            assert 1 == len(executor.queued_tasks), "Task should remain in queue"
+            assert executor.event_buffer == {}
+            assert (
                 "INFO:airflow.executors.celery_executor.CeleryExecutor:"
-                f"[Try 2 of 3] Task Timeout Error for Task: ({key}).",
-                cm.output,
+                f"[Try 2 of 3] Task Timeout Error for Task: ({key})." in cm.output
             )
 
             executor.heartbeat()
-            self.assertEqual(dict(executor.task_publish_retries), {key: 4})
-            self.assertEqual(1, len(executor.queued_tasks), "Task should remain in queue")
-            self.assertEqual(executor.event_buffer, {})
-            self.assertIn(
+            assert dict(executor.task_publish_retries) == {key: 4}
+            assert 1 == len(executor.queued_tasks), "Task should remain in queue"
+            assert executor.event_buffer == {}
+            assert (
                 "INFO:airflow.executors.celery_executor.CeleryExecutor:"
-                f"[Try 3 of 3] Task Timeout Error for Task: ({key}).",
-                cm.output,
+                f"[Try 3 of 3] Task Timeout Error for Task: ({key})." in cm.output
             )
 
             executor.heartbeat()
-            self.assertEqual(dict(executor.task_publish_retries), {})
-            self.assertEqual(0, len(executor.queued_tasks), "Task should no longer be in queue")
-            self.assertEqual(executor.event_buffer[('fail', 'fake_simple_ti', when, 0)][0], State.FAILED)
+            assert dict(executor.task_publish_retries) == {}
+            assert 0 == len(executor.queued_tasks), "Task should no longer be in queue"
+            assert executor.event_buffer[('fail', 'fake_simple_ti', when, 0)][0] == State.FAILED
 
     @pytest.mark.quarantined
     @pytest.mark.backend("mysql", "postgres")
@@ -277,8 +267,8 @@ class TestCeleryExecutor(unittest.TestCase):
             executor.tasks = {'key': FakeCeleryResult()}
             executor.bulk_state_fetcher._get_many_using_multiprocessing(executor.tasks.values())
 
-        self.assertTrue(any(celery_executor.CELERY_FETCH_ERR_MSG_HEADER in line for line in cm.output))
-        self.assertTrue(any("Exception" in line for line in cm.output))
+        assert any(celery_executor.CELERY_FETCH_ERR_MSG_HEADER in line for line in cm.output)
+        assert any("Exception" in line for line in cm.output)
 
     @mock.patch('airflow.executors.celery_executor.CeleryExecutor.sync')
     @mock.patch('airflow.executors.celery_executor.CeleryExecutor.trigger_tasks')
@@ -323,7 +313,7 @@ class TestCeleryExecutor(unittest.TestCase):
         tis = [key1]
         executor = celery_executor.CeleryExecutor()
 
-        self.assertEqual(executor.try_adopt_task_instances(tis), tis)
+        assert executor.try_adopt_task_instances(tis) == tis
 
     @pytest.mark.backend("mysql", "postgres")
     def test_try_adopt_task_instances(self):
@@ -346,24 +336,21 @@ class TestCeleryExecutor(unittest.TestCase):
 
         tis = [ti1, ti2]
         executor = celery_executor.CeleryExecutor()
-        self.assertEqual(executor.running, set())
-        self.assertEqual(executor.adopted_task_timeouts, {})
-        self.assertEqual(executor.tasks, {})
+        assert executor.running == set()
+        assert executor.adopted_task_timeouts == {}
+        assert executor.tasks == {}
 
         not_adopted_tis = executor.try_adopt_task_instances(tis)
 
         key_1 = TaskInstanceKey(dag.dag_id, task_1.task_id, exec_date, try_number)
         key_2 = TaskInstanceKey(dag.dag_id, task_2.task_id, exec_date, try_number)
-        self.assertEqual(executor.running, {key_1, key_2})
-        self.assertEqual(
-            dict(executor.adopted_task_timeouts),
-            {
-                key_1: queued_dttm + executor.task_adoption_timeout,
-                key_2: queued_dttm + executor.task_adoption_timeout,
-            },
-        )
-        self.assertEqual(executor.tasks, {key_1: AsyncResult("231"), key_2: AsyncResult("232")})
-        self.assertEqual(not_adopted_tis, [])
+        assert executor.running == {key_1, key_2}
+        assert dict(executor.adopted_task_timeouts) == {
+            key_1: queued_dttm + executor.task_adoption_timeout,
+            key_2: queued_dttm + executor.task_adoption_timeout,
+        }
+        assert executor.tasks == {key_1: AsyncResult("231"), key_2: AsyncResult("232")}
+        assert not_adopted_tis == []
 
     @pytest.mark.backend("mysql", "postgres")
     def test_check_for_stalled_adopted_tasks(self):
@@ -387,9 +374,9 @@ class TestCeleryExecutor(unittest.TestCase):
         }
         executor.tasks = {key_1: AsyncResult("231"), key_2: AsyncResult("232")}
         executor.sync()
-        self.assertEqual(executor.event_buffer, {key_1: (State.FAILED, None), key_2: (State.FAILED, None)})
-        self.assertEqual(executor.tasks, {})
-        self.assertEqual(executor.adopted_task_timeouts, {})
+        assert executor.event_buffer == {key_1: (State.FAILED, None), key_2: (State.FAILED, None)}
+        assert executor.tasks == {}
+        assert executor.adopted_task_timeouts == {}
 
 
 def test_operation_timeout_config():
@@ -438,10 +425,10 @@ class TestBulkStateFetcher(unittest.TestCase):
 
         # Assert called - ignore order
         mget_args, _ = mock_mget.call_args
-        self.assertEqual(set(mget_args[0]), {b'celery-task-meta-456', b'celery-task-meta-123'})
+        assert set(mget_args[0]) == {b'celery-task-meta-456', b'celery-task-meta-123'}
         mock_mget.assert_called_once_with(mock.ANY)
 
-        self.assertEqual(result, {'123': ('SUCCESS', None), '456': ("PENDING", None)})
+        assert result == {'123': ('SUCCESS', None), '456': ("PENDING", None)}
 
     @mock.patch("celery.backends.database.DatabaseBackend.ResultSession")
     @pytest.mark.integration("redis")
@@ -465,7 +452,7 @@ class TestBulkStateFetcher(unittest.TestCase):
             ]
         )
 
-        self.assertEqual(result, {'123': ('SUCCESS', None), '456': ("PENDING", None)})
+        assert result == {'123': ('SUCCESS', None), '456': ("PENDING", None)}
 
     @pytest.mark.integration("redis")
     @pytest.mark.integration("rabbitmq")
@@ -483,4 +470,4 @@ class TestBulkStateFetcher(unittest.TestCase):
                     ]
                 )
 
-        self.assertEqual(result, {'123': ('SUCCESS', None), '456': ("PENDING", None)})
+        assert result == {'123': ('SUCCESS', None), '456': ("PENDING", None)}
diff --git a/tests/executors/test_dask_executor.py b/tests/executors/test_dask_executor.py
index 09a22f7..d23f94b 100644
--- a/tests/executors/test_dask_executor.py
+++ b/tests/executors/test_dask_executor.py
@@ -62,12 +62,12 @@ class TestBaseDask(unittest.TestCase):
                 )
 
         # both tasks should have finished
-        self.assertTrue(success_future.done())
-        self.assertTrue(fail_future.done())
+        assert success_future.done()
+        assert fail_future.done()
 
         # check task exceptions
-        self.assertTrue(success_future.exception() is None)
-        self.assertTrue(fail_future.exception() is not None)
+        assert success_future.exception() is None
+        assert fail_future.exception() is not None
 
 
 class TestDaskExecutor(TestBaseDask):
diff --git a/tests/executors/test_executor_loader.py b/tests/executors/test_executor_loader.py
index 63ef8dd..43dbbbd 100644
--- a/tests/executors/test_executor_loader.py
+++ b/tests/executors/test_executor_loader.py
@@ -56,19 +56,19 @@ class TestExecutorLoader(unittest.TestCase):
     def test_should_support_executor_from_core(self, executor_name):
         with conf_vars({("core", "executor"): executor_name}):
             executor = ExecutorLoader.get_default_executor()
-            self.assertIsNotNone(executor)
-            self.assertEqual(executor_name, executor.__class__.__name__)
+            assert executor is not None
+            assert executor_name == executor.__class__.__name__
 
     @mock.patch("airflow.plugins_manager.plugins", [FakePlugin()])
     @mock.patch("airflow.plugins_manager.executors_modules", None)
     def test_should_support_plugins(self):
         with conf_vars({("core", "executor"): f"{TEST_PLUGIN_NAME}.FakeExecutor"}):
             executor = ExecutorLoader.get_default_executor()
-            self.assertIsNotNone(executor)
-            self.assertEqual("FakeExecutor", executor.__class__.__name__)
+            assert executor is not None
+            assert "FakeExecutor" == executor.__class__.__name__
 
     def test_should_support_custom_path(self):
         with conf_vars({("core", "executor"): "tests.executors.test_executor_loader.FakeExecutor"}):
             executor = ExecutorLoader.get_default_executor()
-            self.assertIsNotNone(executor)
-            self.assertEqual("FakeExecutor", executor.__class__.__name__)
+            assert executor is not None
+            assert "FakeExecutor" == executor.__class__.__name__
diff --git a/tests/executors/test_kubernetes_executor.py b/tests/executors/test_kubernetes_executor.py
index 9d8d72f..9abb328 100644
--- a/tests/executors/test_kubernetes_executor.py
+++ b/tests/executors/test_kubernetes_executor.py
@@ -83,7 +83,7 @@ class TestAirflowKubernetesScheduler(unittest.TestCase):
     def test_create_pod_id(self):
         for dag_id, task_id in self._cases():
             pod_name = PodGenerator.make_unique_pod_id(create_pod_id(dag_id, task_id))
-            self.assertTrue(self._is_valid_pod_id(pod_name))
+            assert self._is_valid_pod_id(pod_name)
 
     @unittest.skipIf(AirflowKubernetesScheduler is None, 'kubernetes python package is not installed')
     @mock.patch("airflow.kubernetes.pod_generator.PodGenerator")
@@ -91,32 +91,30 @@ class TestAirflowKubernetesScheduler(unittest.TestCase):
     def test_get_base_pod_from_template(self, mock_kubeconfig, mock_generator):
         pod_template_file_path = "/bar/biz"
         get_base_pod_from_template(pod_template_file_path, None)
-        self.assertEqual("deserialize_model_dict", mock_generator.mock_calls[0][0])
-        self.assertEqual(pod_template_file_path, mock_generator.mock_calls[0][1][0])
+        assert "deserialize_model_dict" == mock_generator.mock_calls[0][0]
+        assert pod_template_file_path == mock_generator.mock_calls[0][1][0]
         mock_kubeconfig.pod_template_file = "/foo/bar"
         get_base_pod_from_template(None, mock_kubeconfig)
-        self.assertEqual("deserialize_model_dict", mock_generator.mock_calls[1][0])
-        self.assertEqual("/foo/bar", mock_generator.mock_calls[1][1][0])
+        assert "deserialize_model_dict" == mock_generator.mock_calls[1][0]
+        assert "/foo/bar" == mock_generator.mock_calls[1][1][0]
 
     def test_make_safe_label_value(self):
         for dag_id, task_id in self._cases():
             safe_dag_id = pod_generator.make_safe_label_value(dag_id)
-            self.assertTrue(self._is_safe_label_value(safe_dag_id))
+            assert self._is_safe_label_value(safe_dag_id)
             safe_task_id = pod_generator.make_safe_label_value(task_id)
-            self.assertTrue(self._is_safe_label_value(safe_task_id))
+            assert self._is_safe_label_value(safe_task_id)
             dag_id = "my_dag_id"
-            self.assertEqual(dag_id, pod_generator.make_safe_label_value(dag_id))
+            assert dag_id == pod_generator.make_safe_label_value(dag_id)
             dag_id = "my_dag_id_" + "a" * 64
-            self.assertEqual(
-                "my_dag_id_" + "a" * 43 + "-0ce114c45", pod_generator.make_safe_label_value(dag_id)
-            )
+            assert "my_dag_id_" + "a" * 43 + "-0ce114c45" == pod_generator.make_safe_label_value(dag_id)
 
     def test_execution_date_serialize_deserialize(self):
         datetime_obj = datetime.now()
         serialized_datetime = pod_generator.datetime_to_label_safe_datestring(datetime_obj)
         new_datetime_obj = pod_generator.label_safe_datestring_to_datetime(serialized_datetime)
 
-        self.assertEqual(datetime_obj, new_datetime_obj)
+        assert datetime_obj == new_datetime_obj
 
 
 class TestKubernetesExecutor(unittest.TestCase):
@@ -172,7 +170,7 @@ class TestKubernetesExecutor(unittest.TestCase):
             kubernetes_executor.sync()
 
             assert mock_kube_client.create_namespaced_pod.called
-            self.assertFalse(kubernetes_executor.task_queue.empty())
+            assert not kubernetes_executor.task_queue.empty()
 
             # Disable the ApiException
             mock_kube_client.create_namespaced_pod.side_effect = None
@@ -180,7 +178,7 @@ class TestKubernetesExecutor(unittest.TestCase):
             # Execute the task without errors should empty the queue
             kubernetes_executor.sync()
             assert mock_kube_client.create_namespaced_pod.called
-            self.assertTrue(kubernetes_executor.task_queue.empty())
+            assert kubernetes_executor.task_queue.empty()
 
     @mock.patch('airflow.executors.kubernetes_executor.KubeConfig')
     @mock.patch('airflow.executors.kubernetes_executor.KubernetesExecutor.sync')
@@ -203,7 +201,7 @@ class TestKubernetesExecutor(unittest.TestCase):
         executor.start()
         key = ('dag_id', 'task_id', 'ex_time', 'try_number1')
         executor._change_state(key, State.RUNNING, 'pod_id', 'default')
-        self.assertTrue(executor.event_buffer[key][0] == State.RUNNING)
+        assert executor.event_buffer[key][0] == State.RUNNING
 
     @mock.patch('airflow.executors.kubernetes_executor.KubernetesJobWatcher')
     @mock.patch('airflow.executors.kubernetes_executor.get_kube_client')
@@ -214,7 +212,7 @@ class TestKubernetesExecutor(unittest.TestCase):
         test_time = timezone.utcnow()
         key = ('dag_id', 'task_id', test_time, 'try_number2')
         executor._change_state(key, State.SUCCESS, 'pod_id', 'default')
-        self.assertTrue(executor.event_buffer[key][0] == State.SUCCESS)
+        assert executor.event_buffer[key][0] == State.SUCCESS
         mock_delete_pod.assert_called_once_with('pod_id', 'default')
 
     @mock.patch('airflow.executors.kubernetes_executor.KubernetesJobWatcher')
@@ -230,7 +228,7 @@ class TestKubernetesExecutor(unittest.TestCase):
         test_time = timezone.utcnow()
         key = ('dag_id', 'task_id', test_time, 'try_number3')
         executor._change_state(key, State.FAILED, 'pod_id', 'default')
-        self.assertTrue(executor.event_buffer[key][0] == State.FAILED)
+        assert executor.event_buffer[key][0] == State.FAILED
         mock_delete_pod.assert_not_called()
 
     # pylint: enable=unused-argument
@@ -249,7 +247,7 @@ class TestKubernetesExecutor(unittest.TestCase):
         executor.start()
         key = ('dag_id', 'task_id', test_time, 'try_number2')
         executor._change_state(key, State.SUCCESS, 'pod_id', 'default')
-        self.assertTrue(executor.event_buffer[key][0] == State.SUCCESS)
+        assert executor.event_buffer[key][0] == State.SUCCESS
         mock_delete_pod.assert_not_called()
 
     @mock.patch('airflow.executors.kubernetes_executor.KubernetesJobWatcher')
@@ -264,7 +262,7 @@ class TestKubernetesExecutor(unittest.TestCase):
         executor.start()
         key = ('dag_id', 'task_id', 'ex_time', 'try_number2')
         executor._change_state(key, State.FAILED, 'pod_id', 'test-namespace')
-        self.assertTrue(executor.event_buffer[key][0] == State.FAILED)
+        assert executor.event_buffer[key][0] == State.FAILED
         mock_delete_pod.assert_called_once_with('pod_id', 'test-namespace')
 
     @mock.patch('airflow.executors.kubernetes_executor.get_kube_client')
@@ -278,20 +276,17 @@ class TestKubernetesExecutor(unittest.TestCase):
             )
         )
         executor.adopt_launched_task(mock_kube_client, pod=pod, pod_ids=pod_ids)
-        self.assertEqual(
-            mock_kube_client.patch_namespaced_pod.call_args[1],
-            {
-                'body': {
-                    'metadata': {
-                        'labels': {'airflow-worker': 'modified', 'dag_id': 'dag', 'task_id': 'task'},
-                        'name': 'foo',
-                    }
-                },
-                'name': 'foo',
-                'namespace': None,
+        assert mock_kube_client.patch_namespaced_pod.call_args[1] == {
+            'body': {
+                'metadata': {
+                    'labels': {'airflow-worker': 'modified', 'dag_id': 'dag', 'task_id': 'task'},
+                    'name': 'foo',
+                }
             },
-        )
-        self.assertDictEqual(pod_ids, {})
+            'name': 'foo',
+            'namespace': None,
+        }
+        assert pod_ids == {}
 
     @mock.patch('airflow.executors.kubernetes_executor.get_kube_client')
     def test_not_adopt_unassigned_task(self, mock_kube_client):
@@ -311,5 +306,5 @@ class TestKubernetesExecutor(unittest.TestCase):
             )
         )
         executor.adopt_launched_task(mock_kube_client, pod=pod, pod_ids=pod_ids)
-        self.assertFalse(mock_kube_client.patch_namespaced_pod.called)
-        self.assertDictEqual(pod_ids, {"foobar": {}})
+        assert not mock_kube_client.patch_namespaced_pod.called
+        assert pod_ids == {"foobar": {}}
diff --git a/tests/executors/test_local_executor.py b/tests/executors/test_local_executor.py
index b12f783..92ddd02 100644
--- a/tests/executors/test_local_executor.py
+++ b/tests/executors/test_local_executor.py
@@ -64,7 +64,7 @@ class TestLocalExecutor(unittest.TestCase):
         executor.start()
 
         success_key = 'success {}'
-        self.assertTrue(executor.result_queue.empty())
+        assert executor.result_queue.empty()
 
         execution_date = datetime.datetime.now()
         for i in range(self.TEST_SUCCESS_COMMANDS):
@@ -79,16 +79,16 @@ class TestLocalExecutor(unittest.TestCase):
 
         executor.end()
         # By that time Queues are already shutdown so we cannot check if they are empty
-        self.assertEqual(len(executor.running), 0)
+        assert len(executor.running) == 0
 
         for i in range(self.TEST_SUCCESS_COMMANDS):
             key_id = success_key.format(i)
             key = key_id, 'fake_ti', execution_date, 0
-            self.assertEqual(executor.event_buffer[key][0], State.SUCCESS)
-        self.assertEqual(executor.event_buffer[fail_key][0], State.FAILED)
+            assert executor.event_buffer[key][0] == State.SUCCESS
+        assert executor.event_buffer[fail_key][0] == State.FAILED
 
         expected = self.TEST_SUCCESS_COMMANDS + 1 if parallelism == 0 else parallelism
-        self.assertEqual(executor.workers_used, expected)
+        assert executor.workers_used == expected
 
     def test_execution_subprocess_unlimited_parallelism(self):
         with mock.patch.object(
diff --git a/tests/hooks/test_dbapi.py b/tests/hooks/test_dbapi.py
index fb4ee1c..2cc916d 100644
--- a/tests/hooks/test_dbapi.py
+++ b/tests/hooks/test_dbapi.py
@@ -20,6 +20,8 @@
 import unittest
 from unittest import mock
 
+import pytest
+
 from airflow.hooks.dbapi import DbApiHook
 from airflow.models import Connection
 
@@ -48,7 +50,7 @@ class TestDbApiHook(unittest.TestCase):
 
         self.cur.fetchall.return_value = rows
 
-        self.assertEqual(rows, self.db_hook.get_records(statement))
+        assert rows == self.db_hook.get_records(statement)
 
         assert self.conn.close.call_count == 1
         assert self.cur.close.call_count == 1
@@ -61,7 +63,7 @@ class TestDbApiHook(unittest.TestCase):
 
         self.cur.fetchall.return_value = rows
 
-        self.assertEqual(rows, self.db_hook.get_records(statement, parameters))
+        assert rows == self.db_hook.get_records(statement, parameters)
 
         assert self.conn.close.call_count == 1
         assert self.cur.close.call_count == 1
@@ -71,7 +73,7 @@ class TestDbApiHook(unittest.TestCase):
         statement = "SQL"
         self.cur.fetchall.side_effect = RuntimeError('Great Problems')
 
-        with self.assertRaises(RuntimeError):
+        with pytest.raises(RuntimeError):
             self.db_hook.get_records(statement)
 
         assert self.conn.close.call_count == 1
@@ -88,7 +90,7 @@ class TestDbApiHook(unittest.TestCase):
         assert self.cur.close.call_count == 1
 
         commit_count = 2  # The first and last commit
-        self.assertEqual(commit_count, self.conn.commit.call_count)
+        assert commit_count == self.conn.commit.call_count
 
         sql = f"INSERT INTO {table}  VALUES (%s)"
         for row in rows:
@@ -104,7 +106,7 @@ class TestDbApiHook(unittest.TestCase):
         assert self.cur.close.call_count == 1
 
         commit_count = 2  # The first and last commit
-        self.assertEqual(commit_count, self.conn.commit.call_count)
+        assert commit_count == self.conn.commit.call_count
 
         sql = f"REPLACE INTO {table}  VALUES (%s)"
         for row in rows:
@@ -121,7 +123,7 @@ class TestDbApiHook(unittest.TestCase):
         assert self.cur.close.call_count == 1
 
         commit_count = 2  # The first and last commit
-        self.assertEqual(commit_count, self.conn.commit.call_count)
+        assert commit_count == self.conn.commit.call_count
 
         sql = "INSERT INTO {} ({}) VALUES (%s)".format(table, target_fields[0])
         for row in rows:
@@ -138,7 +140,7 @@ class TestDbApiHook(unittest.TestCase):
         assert self.cur.close.call_count == 1
 
         commit_count = 2 + divmod(len(rows), commit_every)[0]
-        self.assertEqual(commit_count, self.conn.commit.call_count)
+        assert commit_count == self.conn.commit.call_count
 
         sql = f"INSERT INTO {table}  VALUES (%s)"
         for row in rows:
@@ -155,7 +157,7 @@ class TestDbApiHook(unittest.TestCase):
                 port=1,
             )
         )
-        self.assertEqual("conn_type://login:password@host:1/schema", self.db_hook.get_uri())
+        assert "conn_type://login:password@host:1/schema" == self.db_hook.get_uri()
 
     def test_get_uri_schema_none(self):
         self.db_hook.get_connection = mock.MagicMock(
@@ -163,7 +165,7 @@ class TestDbApiHook(unittest.TestCase):
                 conn_type="conn_type", host="host", login="login", password="password", schema=None, port=1
             )
         )
-        self.assertEqual("conn_type://login:password@host:1/", self.db_hook.get_uri())
+        assert "conn_type://login:password@host:1/" == self.db_hook.get_uri()
 
     def test_get_uri_special_characters(self):
         self.db_hook.get_connection = mock.MagicMock(
@@ -176,7 +178,7 @@ class TestDbApiHook(unittest.TestCase):
                 port=1,
             )
         )
-        self.assertEqual("conn_type://logi%23%21+n:pass%2A%21+word@host:1/schema", self.db_hook.get_uri())
+        assert "conn_type://logi%23%21+n:pass%2A%21+word@host:1/schema" == self.db_hook.get_uri()
 
     def test_run_log(self):
         statement = 'SQL'
diff --git a/tests/jobs/test_backfill_job.py b/tests/jobs/test_backfill_job.py
index 43cc992..aa221f5 100644
--- a/tests/jobs/test_backfill_job.py
+++ b/tests/jobs/test_backfill_job.py
@@ -107,7 +107,7 @@ class TestBackfillJob(unittest.TestCase):
 
         dag_run.refresh_from_db()
 
-        self.assertEqual(State.FAILED, dag_run.state)
+        assert State.FAILED == dag_run.state
 
     def test_dag_run_with_finished_tasks_set_to_success(self):
         dag = self._get_dummy_dag('dummy_dag')
@@ -131,7 +131,7 @@ class TestBackfillJob(unittest.TestCase):
 
         dag_run.refresh_from_db()
 
-        self.assertEqual(State.SUCCESS, dag_run.state)
+        assert State.SUCCESS == dag_run.state
 
     @pytest.mark.xfail(condition=True, reason="This test is flaky")
     @pytest.mark.backend("postgres", "mysql")
@@ -146,7 +146,7 @@ class TestBackfillJob(unittest.TestCase):
         #    target_dag,
         #    dag_runs=DagRun.find(dag_id='example_trigger_target_dag')
         # )
-        self.assertFalse(task_instances_list)
+        assert not task_instances_list
 
         job = BackfillJob(
             dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_first_depends_on_past=True
@@ -159,7 +159,7 @@ class TestBackfillJob(unittest.TestCase):
         #    dag_runs=DagRun.find(dag_id='example_trigger_target_dag')
         # )
 
-        self.assertTrue(task_instances_list)
+        assert task_instances_list
 
     @pytest.mark.backend("postgres", "mysql")
     def test_backfill_multi_dates(self):
@@ -192,21 +192,18 @@ class TestBackfillJob(unittest.TestCase):
             ("run_this_last", DEFAULT_DATE),
             ("run_this_last", end_date),
         ]
-        self.assertListEqual(
-            [
-                ((dag.dag_id, task_id, when, 1), (State.SUCCESS, None))
-                for (task_id, when) in expected_execution_order
-            ],
-            executor.sorted_tasks,
-        )
+        assert [
+            ((dag.dag_id, task_id, when, 1), (State.SUCCESS, None))
+            for (task_id, when) in expected_execution_order
+        ] == executor.sorted_tasks
 
         session = settings.Session()
         drs = session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).order_by(DagRun.execution_date).all()
 
-        self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
-        self.assertTrue(drs[0].state == State.SUCCESS)
-        self.assertTrue(drs[1].execution_date == DEFAULT_DATE + datetime.timedelta(days=1))
-        self.assertTrue(drs[1].state == State.SUCCESS)
+        assert drs[0].execution_date == DEFAULT_DATE
+        assert drs[0].state == State.SUCCESS
+        assert drs[1].execution_date == DEFAULT_DATE + datetime.timedelta(days=1)
+        assert drs[1].state == State.SUCCESS
 
         dag.clear()
         session.close()
@@ -271,13 +268,10 @@ class TestBackfillJob(unittest.TestCase):
         )
 
         job.run()
-        self.assertListEqual(
-            [
-                ((dag_id, task_id, DEFAULT_DATE, 1), (State.SUCCESS, None))
-                for task_id in expected_execution_order
-            ],
-            executor.sorted_tasks,
-        )
+        assert [
+            ((dag_id, task_id, DEFAULT_DATE, 1), (State.SUCCESS, None))
+            for task_id in expected_execution_order
+        ] == executor.sorted_tasks
 
     def test_backfill_conf(self):
         dag = self._get_dummy_dag('test_backfill_conf')
@@ -296,7 +290,7 @@ class TestBackfillJob(unittest.TestCase):
... 44610 lines suppressed ...