diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index 4ad2fbc71c8c1..f6c35decfd30b 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -77,6 +77,7 @@ jobs:
 
     - name: Install pandas in editable mode
       id: build-editable
+      if: ${{ steps.build.outcome == 'success' && always() }}
       uses: ./.github/actions/build_pandas
       with:
         editable: true
diff --git a/.github/workflows/deprecation-tracking-bot.yml b/.github/workflows/deprecation-tracking-bot.yml
index c0d871ed54ed6..b3f9bcd840c68 100644
--- a/.github/workflows/deprecation-tracking-bot.yml
+++ b/.github/workflows/deprecation-tracking-bot.yml
@@ -1,11 +1,13 @@
+# This bot updates the issue with number DEPRECATION_TRACKER_ISSUE
+# with the PR number that issued the deprecation.
+
+# It runs on commits to main, and will trigger if the PR linked to a merged commit has the "Deprecate" label
 name: Deprecations Bot
 
 on:
-  pull_request:
+  push:
     branches:
       - main
-    types:
-      [closed]
 
 
 permissions:
@@ -15,17 +17,49 @@ jobs:
   deprecation_update:
     permissions:
       issues: write
-    if: >-
-      contains(github.event.pull_request.labels.*.name, 'Deprecate') && github.event.pull_request.merged == true
     runs-on: ubuntu-22.04
     env:
       DEPRECATION_TRACKER_ISSUE: 50578
     steps:
-    - name: Checkout
-      run: |
-        echo "Adding deprecation PR number to deprecation tracking issue"
-        export PR=${{ github.event.pull_request.number }}
-        BODY=$(curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/repos/${{ github.repository }}/issues/${DEPRECATION_TRACKER_ISSUE} |
-          python3 -c "import sys, json, os; x = {'body': json.load(sys.stdin)['body']}; pr = os.environ['PR']; x['body'] += f'\n- [ ] #{pr}'; print(json.dumps(x))")
-        echo ${BODY}
-        curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -X PATCH -d "${BODY}" https://api.github.com/repos/${{ github.repository }}/issues/${DEPRECATION_TRACKER_ISSUE}
+    - uses: actions/github-script@v6
+      id: update-deprecation-issue
+      with:
+        script: |
+          body = await github.rest.issues.get({
+            owner: context.repo.owner,
+            repo: context.repo.repo,
+            issue_number: ${{ env.DEPRECATION_TRACKER_ISSUE }},
+          })
+          body = body["data"]["body"];
+          linkedPRs = await github.rest.repos.listPullRequestsAssociatedWithCommit({
+            owner: context.repo.owner,
+            repo: context.repo.repo,
+            commit_sha: '${{ github.sha }}'
+          })
+          linkedPRs = linkedPRs["data"];
+          console.log(linkedPRs);
+          if (linkedPRs.length > 0) {
+            console.log("Found linked PR");
+            linkedPR = linkedPRs[0]
+            isDeprecation = false
+            for (label of linkedPR["labels"]) {
+              if (label["name"] == "Deprecate") {
+                isDeprecation = true;
+                break;
+              }
+            }
+
+            PR_NUMBER = linkedPR["number"];
+
+            body += ("\n- [ ] #" + PR_NUMBER);
+            if (isDeprecation) {
+              console.log("PR is a deprecation PR. Printing new body of issue");
+              console.log(body);
+              github.rest.issues.update({
+                owner: context.repo.owner,
+                repo: context.repo.repo,
+                issue_number: ${{ env.DEPRECATION_TRACKER_ISSUE }},
+                body: body
+              })
+            }
+          }
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index 9ebe147008451..bd104af4a8d9e 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -230,7 +230,7 @@ jobs:
           /opt/python/cp39-cp39/bin/python -m venv ~/virtualenvs/pandas-dev
           . ~/virtualenvs/pandas-dev/bin/activate
           python -m pip install -U pip wheel setuptools meson[ninja]==1.0.1 meson-python==0.13.1
-          python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1
+          python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1
           python -m pip install --no-cache-dir --no-build-isolation -e .
           python -m pip list --no-cache-dir
           export PANDAS_CI=1
@@ -268,7 +268,7 @@ jobs:
           /opt/python/cp39-cp39/bin/python -m venv ~/virtualenvs/pandas-dev
           . ~/virtualenvs/pandas-dev/bin/activate
           python -m pip install -U pip wheel setuptools meson-python==0.13.1 meson[ninja]==1.0.1
-          python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1
+          python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1
           python -m pip install --no-cache-dir --no-build-isolation -e .
           python -m pip list --no-cache-dir
 
@@ -337,10 +337,10 @@ jobs:
         run: |
           python --version
           python -m pip install --upgrade pip setuptools wheel
-          python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy
+          python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy
           python -m pip install git+https://github.com/nedbat/coveragepy.git
           python -m pip install versioneer[toml]
-          python -m pip install python-dateutil pytz cython hypothesis>=6.46.1 pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-cov pytest-asyncio>=0.17
+          python -m pip install python-dateutil pytz cython hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-cov pytest-asyncio>=0.17
           python -m pip list
 
       - name: Build Pandas
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index 0a508a8b1701f..f1f9646054132 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -110,9 +110,12 @@ jobs:
           path: ./dist
 
       - name: Build wheels
-        uses: pypa/cibuildwheel@v2.13.0
-        with:
-          package-dir: ./dist/${{ needs.build_sdist.outputs.sdist_file }}
+        uses: pypa/cibuildwheel@v2.13.1
+        # TODO: Build wheels from sdist again
+        # There's some sort of weird race condition?
+        # within Github that makes the sdist be missing files
+        #with:
+        #  package-dir: ./dist/${{ needs.build_sdist.outputs.sdist_file }}
         env:
           CIBW_BUILD: ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }}
 
@@ -137,7 +140,7 @@ jobs:
         shell: pwsh
         run: |
           $TST_CMD = @"
-          python -m pip install pytz six numpy python-dateutil tzdata>=2022.1 hypothesis>=6.46.1 pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17;
+          python -m pip install pytz six numpy python-dateutil tzdata>=2022.1 hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17;
           python -m pip install --find-links=pandas\wheelhouse --no-index pandas;
           python -c `'import pandas as pd; pd.test()`';
           "@
@@ -156,7 +159,7 @@ jobs:
           PANDAS_STAGING_UPLOAD_TOKEN: ${{ secrets.PANDAS_STAGING_UPLOAD_TOKEN }}
           PANDAS_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.PANDAS_NIGHTLY_UPLOAD_TOKEN }}
         # trigger an upload to
-        # https://anaconda.org/scipy-wheels-nightly/pandas
+        # https://anaconda.org/scientific-python-nightly-wheels/pandas
         # for cron jobs or "Run workflow" (restricted to main branch).
         # Tags will upload to
         # https://anaconda.org/multibuild-wheels-staging/pandas
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 4c0f3ddd826b7..6617b3c8b4cca 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -57,6 +57,38 @@
     },
 }
 
+# These aggregations don't have a kernel implemented for them yet
+_numba_unsupported_methods = [
+    "all",
+    "any",
+    "bfill",
+    "count",
+    "cumcount",
+    "cummax",
+    "cummin",
+    "cumprod",
+    "cumsum",
+    "describe",
+    "diff",
+    "ffill",
+    "first",
+    "head",
+    "last",
+    "median",
+    "nunique",
+    "pct_change",
+    "prod",
+    "quantile",
+    "rank",
+    "sem",
+    "shift",
+    "size",
+    "skew",
+    "tail",
+    "unique",
+    "value_counts",
+]
+
 
 class ApplyDictReturn:
     def setup(self):
@@ -453,9 +485,10 @@ class GroupByMethods:
         ],
         ["direct", "transformation"],
         [1, 5],
+        ["cython", "numba"],
     ]
 
-    def setup(self, dtype, method, application, ncols):
+    def setup(self, dtype, method, application, ncols, engine):
         if method in method_blocklist.get(dtype, {}):
             raise NotImplementedError  # skip benchmark
 
@@ -474,6 +507,19 @@ def setup(self, dtype, method, application, ncols):
             # DataFrameGroupBy doesn't have these methods
             raise NotImplementedError
 
+        # Numba currently doesn't support
+        # multiple transform functions or strs for transform,
+        # grouping on multiple columns
+        # and we lack kernels for a bunch of methods
+        if (
+            engine == "numba"
+            and method in _numba_unsupported_methods
+            or ncols > 1
+            or application == "transformation"
+            or dtype == "datetime"
+        ):
+            raise NotImplementedError
+
         if method == "describe":
             ngroups = 20
         elif method == "skew":
@@ -505,17 +551,30 @@ def setup(self, dtype, method, application, ncols):
         if len(cols) == 1:
             cols = cols[0]
 
+        # Not everything supports the engine keyword yet
+        kwargs = {}
+        if engine == "numba":
+            kwargs["engine"] = engine
+
         if application == "transformation":
-            self.as_group_method = lambda: df.groupby("key")[cols].transform(method)
-            self.as_field_method = lambda: df.groupby(cols)["key"].transform(method)
+            self.as_group_method = lambda: df.groupby("key")[cols].transform(
+                method, **kwargs
+            )
+            self.as_field_method = lambda: df.groupby(cols)["key"].transform(
+                method, **kwargs
+            )
         else:
-            self.as_group_method = getattr(df.groupby("key")[cols], method)
-            self.as_field_method = getattr(df.groupby(cols)["key"], method)
+            self.as_group_method = partial(
+                getattr(df.groupby("key")[cols], method), **kwargs
+            )
+            self.as_field_method = partial(
+                getattr(df.groupby(cols)["key"], method), **kwargs
+            )
 
-    def time_dtype_as_group(self, dtype, method, application, ncols):
+    def time_dtype_as_group(self, dtype, method, application, ncols, engine):
         self.as_group_method()
 
-    def time_dtype_as_field(self, dtype, method, application, ncols):
+    def time_dtype_as_field(self, dtype, method, application, ncols, engine):
         self.as_field_method()
 
 
@@ -532,8 +591,12 @@ class GroupByCythonAgg:
         [
             "sum",
             "prod",
-            "min",
-            "max",
+            # TODO: uncomment min/max
+            # Currently, min/max implemented very inefficiently
+            # because it re-uses the Window min/max kernel
+            # so it will time out ASVs
+            # "min",
+            # "max",
             "mean",
             "median",
             "var",
@@ -554,6 +617,22 @@ def time_frame_agg(self, dtype, method):
         self.df.groupby("key").agg(method)
 
 
+class GroupByNumbaAgg(GroupByCythonAgg):
+    """
+    Benchmarks specifically targeting our numba aggregation algorithms
+    (using a big enough dataframe with simple key, so a large part of the
+    time is actually spent in the grouped aggregation).
+    """
+
+    def setup(self, dtype, method):
+        if method in _numba_unsupported_methods:
+            raise NotImplementedError
+        super().setup(dtype, method)
+
+    def time_frame_agg(self, dtype, method):
+        self.df.groupby("key").agg(method, engine="numba")
+
+
 class GroupByCythonAggEaDtypes:
     """
     Benchmarks specifically targeting our cython aggregation algorithms
diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py
index 9c997b5386eaa..87dcdb16fa647 100644
--- a/asv_bench/benchmarks/multiindex_object.py
+++ b/asv_bench/benchmarks/multiindex_object.py
@@ -396,4 +396,30 @@ def time_putmask_all_different(self):
         self.midx.putmask(self.mask, self.midx_values_different)
 
 
+class Append:
+    params = ["datetime64[ns]", "int64", "string"]
+    param_names = ["dtype"]
+
+    def setup(self, dtype):
+        N1 = 1000
+        N2 = 500
+        left_level1 = range(N1)
+        right_level1 = range(N1, N1 + N1)
+
+        if dtype == "datetime64[ns]":
+            level2 = date_range(start="2000-01-01", periods=N2)
+        elif dtype == "int64":
+            level2 = range(N2)
+        elif dtype == "string":
+            level2 = tm.makeStringIndex(N2)
+        else:
+            raise NotImplementedError
+
+        self.left = MultiIndex.from_product([left_level1, level2])
+        self.right = MultiIndex.from_product([right_level1, level2])
+
+    def time_append(self, dtype):
+        self.left.append(self.right)
+
+
 from .pandas_vb_common import setup  # noqa: F401 isort:skip
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 9e50b93e173c4..472bd78e4d3bc 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -105,43 +105,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
         pandas.errors.UnsupportedFunctionCall \
         pandas.test \
         pandas.NaT \
-        pandas.Timestamp.as_unit \
-        pandas.Timestamp.ctime \
-        pandas.Timestamp.date \
-        pandas.Timestamp.dst \
-        pandas.Timestamp.isocalendar \
-        pandas.Timestamp.isoweekday \
-        pandas.Timestamp.strptime \
-        pandas.Timestamp.time \
-        pandas.Timestamp.timetuple \
-        pandas.Timestamp.timetz \
-        pandas.Timestamp.to_datetime64 \
-        pandas.Timestamp.toordinal \
-        pandas.Timestamp.tzname \
-        pandas.Timestamp.utcoffset \
-        pandas.Timestamp.utctimetuple \
-        pandas.Timestamp.weekday \
-        pandas.arrays.DatetimeArray \
-        pandas.Timedelta.view \
-        pandas.Timedelta.as_unit \
-        pandas.Timedelta.ceil \
-        pandas.Timedelta.floor \
-        pandas.Timedelta.round \
-        pandas.Timedelta.to_pytimedelta \
-        pandas.Timedelta.to_timedelta64 \
-        pandas.Timedelta.to_numpy \
-        pandas.Timedelta.total_seconds \
-        pandas.arrays.TimedeltaArray \
-        pandas.Period.asfreq \
-        pandas.Period.now \
-        pandas.arrays.PeriodArray \
-        pandas.CategoricalDtype.categories \
-        pandas.CategoricalDtype.ordered \
-        pandas.Categorical.dtype \
-        pandas.Categorical.categories \
-        pandas.Categorical.ordered \
-        pandas.Categorical.codes \
-        pandas.Categorical.__array__ \
         pandas.SparseDtype \
         pandas.DatetimeTZDtype.unit \
         pandas.DatetimeTZDtype.tz \
@@ -263,41 +226,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
         pandas.core.window.ewm.ExponentialMovingWindow.cov \
         pandas.api.indexers.BaseIndexer \
         pandas.api.indexers.VariableOffsetWindowIndexer \
-        pandas.core.groupby.DataFrameGroupBy.diff \
-        pandas.core.groupby.DataFrameGroupBy.ffill \
-        pandas.core.groupby.DataFrameGroupBy.max \
-        pandas.core.groupby.DataFrameGroupBy.median \
-        pandas.core.groupby.DataFrameGroupBy.min \
-        pandas.core.groupby.DataFrameGroupBy.ohlc \
-        pandas.core.groupby.DataFrameGroupBy.pct_change \
-        pandas.core.groupby.DataFrameGroupBy.prod \
-        pandas.core.groupby.DataFrameGroupBy.sem \
-        pandas.core.groupby.DataFrameGroupBy.shift \
-        pandas.core.groupby.DataFrameGroupBy.size \
-        pandas.core.groupby.DataFrameGroupBy.skew \
-        pandas.core.groupby.DataFrameGroupBy.std \
-        pandas.core.groupby.DataFrameGroupBy.sum \
-        pandas.core.groupby.DataFrameGroupBy.var \
-        pandas.core.groupby.SeriesGroupBy.diff \
-        pandas.core.groupby.SeriesGroupBy.fillna \
-        pandas.core.groupby.SeriesGroupBy.ffill \
-        pandas.core.groupby.SeriesGroupBy.max \
-        pandas.core.groupby.SeriesGroupBy.median \
-        pandas.core.groupby.SeriesGroupBy.min \
-        pandas.core.groupby.SeriesGroupBy.nunique \
-        pandas.core.groupby.SeriesGroupBy.ohlc \
-        pandas.core.groupby.SeriesGroupBy.pct_change \
-        pandas.core.groupby.SeriesGroupBy.prod \
-        pandas.core.groupby.SeriesGroupBy.sem \
-        pandas.core.groupby.SeriesGroupBy.shift \
-        pandas.core.groupby.SeriesGroupBy.size \
-        pandas.core.groupby.SeriesGroupBy.skew \
-        pandas.core.groupby.SeriesGroupBy.std \
-        pandas.core.groupby.SeriesGroupBy.sum \
-        pandas.core.groupby.SeriesGroupBy.var \
-        pandas.core.groupby.SeriesGroupBy.hist \
-        pandas.core.groupby.DataFrameGroupBy.plot \
-        pandas.core.groupby.SeriesGroupBy.plot \
         pandas.io.formats.style.Styler \
         pandas.io.formats.style.Styler.from_custom_template \
         pandas.io.formats.style.Styler.set_caption \
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 91999e151dfd7..0923594f2c840 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -11,7 +11,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - pytest-asyncio>=0.17.0
diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml
index 2115ee8f7d86a..51c7a97ad6500 100644
--- a/ci/deps/actions-311-downstream_compat.yaml
+++ b/ci/deps/actions-311-downstream_compat.yaml
@@ -12,7 +12,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - pytest-asyncio>=0.17.0
diff --git a/ci/deps/actions-311-numpydev.yaml b/ci/deps/actions-311-numpydev.yaml
index 5379c2fddec21..2cd4d5f3528f8 100644
--- a/ci/deps/actions-311-numpydev.yaml
+++ b/ci/deps/actions-311-numpydev.yaml
@@ -10,7 +10,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   # Once pytest-cov > 4 comes out, unpin this
   # Right now, a DeprecationWarning related to rsyncdir
diff --git a/ci/deps/actions-311-pyarrownightly.yaml b/ci/deps/actions-311-pyarrownightly.yaml
index 7a82fde475a4b..f24e866af0439 100644
--- a/ci/deps/actions-311-pyarrownightly.yaml
+++ b/ci/deps/actions-311-pyarrownightly.yaml
@@ -11,7 +11,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - hypothesis>=6.46.1
diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml
index 215b3a7309bf4..66b8650116854 100644
--- a/ci/deps/actions-311.yaml
+++ b/ci/deps/actions-311.yaml
@@ -11,7 +11,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - pytest-asyncio>=0.17.0
diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml
index 72c7cef5326c3..e1b4fdfb1d897 100644
--- a/ci/deps/actions-39-minimum_versions.yaml
+++ b/ci/deps/actions-39-minimum_versions.yaml
@@ -13,7 +13,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - pytest-asyncio>=0.17.0
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 78a0a2097ede6..8ff47dbb9cc95 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -11,7 +11,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - pytest-asyncio>=0.17.0
diff --git a/ci/deps/actions-pypy-39.yaml b/ci/deps/actions-pypy-39.yaml
index 591ce33fc18af..035395d55eb3a 100644
--- a/ci/deps/actions-pypy-39.yaml
+++ b/ci/deps/actions-pypy-39.yaml
@@ -14,7 +14,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-asyncio>=0.17.0
   - pytest-xdist>=2.2.0
diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml
index 1813dbe3a1df5..ca9860fc20742 100644
--- a/ci/deps/circle-310-arm64.yaml
+++ b/ci/deps/circle-310-arm64.yaml
@@ -11,7 +11,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - pytest-asyncio>=0.17.0
diff --git a/ci/meta.yaml b/ci/meta.yaml
index f02c7eec001fc..09ae0d7253bf7 100644
--- a/ci/meta.yaml
+++ b/ci/meta.yaml
@@ -62,7 +62,7 @@ test:
     - python -c "import pandas; pandas.test(extra_args={{ extra_args }})"  # [python_impl == "cpython"]
   requires:
     - pip
-    - pytest >=7.0.0
+    - pytest >=7.3.2
     - pytest-asyncio >=0.17.0
     - pytest-xdist >=2.2.0
     - pytest-cov
diff --git a/ci/upload_wheels.sh b/ci/upload_wheels.sh
index f760621ea0e6b..3c4aa76c02003 100644
--- a/ci/upload_wheels.sh
+++ b/ci/upload_wheels.sh
@@ -10,7 +10,7 @@ set_upload_vars() {
         export ANACONDA_UPLOAD="true"
     elif [[ "$IS_SCHEDULE_DISPATCH" == "true" ]]; then
         echo scheduled or dispatched event
-        export ANACONDA_ORG="scipy-wheels-nightly"
+        export ANACONDA_ORG="scientific-python-nightly-wheels"
         export TOKEN="$PANDAS_NIGHTLY_UPLOAD_TOKEN"
         export ANACONDA_UPLOAD="true"
     else
@@ -28,12 +28,12 @@ upload_wheels() {
             if compgen -G "./dist/*.gz"; then
                 echo "Found sdist"
                 anaconda -q -t ${TOKEN} upload --skip -u ${ANACONDA_ORG} ./dist/*.gz
-            elif compgen -G "./wheelhouse/*.whl"; then
+                echo "Uploaded sdist"
+            fi
+            if compgen -G "./wheelhouse/*.whl"; then
                 echo "Found wheel"
                 anaconda -q -t ${TOKEN} upload --skip -u ${ANACONDA_ORG} ./wheelhouse/*.whl
-            else
-                echo "Files do not exist"
-                return 1
+                echo "Uploaded wheel"
             fi
             echo "PyPI-style index: https://pypi.anaconda.org/$ANACONDA_ORG/simple"
         fi
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index ea69f0b907d8b..4b9a6ba1e069c 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -119,6 +119,22 @@ Some great resources for learning Git:
 * the `NumPy documentation <https://numpy.org/doc/stable/dev/index.html>`_.
 * Matthew Brett's `Pydagogue <https://matthew-brett.github.io/pydagogue/>`_.
 
+Also, the project follows a forking workflow further described on this page whereby
+contributors fork the repository, make changes and then create a pull request.
+So please be sure to read and follow all the instructions in this guide.
+
+If you are new to contributing to projects through forking on GitHub,
+take a look at the `GitHub documentation for contributing to projects <https://docs.github.com/en/get-started/quickstart/contributing-to-projects>`_.
+GitHub provides a quick tutorial using a test repository that may help you become more familiar
+with forking a repository, cloning a fork, creating a feature branch, pushing changes and
+making pull requests.
+
+Below are some useful resources for learning more about forking and pull requests on GitHub:
+
+* the `GitHub documentation for forking a repo <https://docs.github.com/en/get-started/quickstart/fork-a-repo>`_.
+* the `GitHub documentation for collaborating with pull requests <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests>`_.
+* the `GitHub documentation for working with forks <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks>`_.
+
 Getting started with Git
 ------------------------
 
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 90a65e2790c29..e7e637618eecc 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -201,9 +201,9 @@ Installing a nightly build is the quickest way to:
 * Try a new feature that will be shipped in the next release (that is, a feature from a pull-request that was recently merged to the main branch).
 * Check whether a bug you encountered has been fixed since the last release.
 
-You can install the nightly build of pandas using the scipy-wheels-nightly index from the PyPI registry of anaconda.org with the following command::
+You can install the nightly build of pandas using the scientific-python-nightly-wheels index from the PyPI registry of anaconda.org with the following command::
 
-    pip install --pre --extra-index https://pypi.anaconda.org/scipy-wheels-nightly/simple pandas
+    pip install --pre --extra-index https://pypi.anaconda.org/scientific-python-nightly-wheels/simple pandas
 
 Note that first uninstalling pandas might be required to be able to install nightly builds::
 
@@ -216,7 +216,7 @@ pandas is equipped with an exhaustive set of unit tests, covering about 97% of
 the code base as of this writing. To run it on your machine to verify that
 everything is working (and that you have all of the dependencies, soft and hard,
 installed), make sure you have `pytest
-<https://docs.pytest.org/en/latest/>`__ >= 7.0 and `Hypothesis
+<https://docs.pytest.org/en/latest/>`__ >= 7.3.2 and `Hypothesis
 <https://hypothesis.readthedocs.io/en/latest/>`__ >= 6.34.2, then run:
 
 ::
diff --git a/doc/source/getting_started/intro_tutorials/02_read_write.rst b/doc/source/getting_started/intro_tutorials/02_read_write.rst
index dbb1be8c4d875..832c2cc25712f 100644
--- a/doc/source/getting_started/intro_tutorials/02_read_write.rst
+++ b/doc/source/getting_started/intro_tutorials/02_read_write.rst
@@ -99,9 +99,9 @@ strings (``object``).
 .. note::
     When asking for the ``dtypes``, no brackets are used!
     ``dtypes`` is an attribute of a ``DataFrame`` and ``Series``. Attributes
-    of ``DataFrame`` or ``Series`` do not need brackets. Attributes
-    represent a characteristic of a ``DataFrame``/``Series``, whereas a
-    method (which requires brackets) *do* something with the
+    of a ``DataFrame`` or ``Series`` do not need brackets. Attributes
+    represent a characteristic of a ``DataFrame``/``Series``, whereas
+    methods (which require brackets) *do* something with the
     ``DataFrame``/``Series`` as introduced in the :ref:`first tutorial <10min_tut_01_tableoriented>`.
 
 .. raw:: html
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index eec33afb9ab95..65892f01326e4 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -675,7 +675,7 @@ matching index:
 Value counts (histogramming) / mode
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The :meth:`~Series.value_counts` Series method and top-level function computes a histogram
+The :meth:`~Series.value_counts` Series method computes a histogram
 of a 1D array of values. It can also be used as a function on regular arrays:
 
 .. ipython:: python
@@ -684,7 +684,6 @@ of a 1D array of values. It can also be used as a function on regular arrays:
    data
    s = pd.Series(data)
    s.value_counts()
-   pd.value_counts(data)
 
 The :meth:`~DataFrame.value_counts` method can be used to count combinations across multiple columns.
 By default all columns are used but a subset can be selected using the ``subset`` argument.
@@ -733,7 +732,6 @@ normally distributed data into equal-size quartiles like so:
    arr = np.random.randn(30)
    factor = pd.qcut(arr, [0, 0.25, 0.5, 0.75, 1])
    factor
-   pd.value_counts(factor)
 
 We can also pass infinite values to define the bins:
 
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 90a8bd868b60b..84a78ace8d7c7 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -2111,7 +2111,8 @@ Reading from a JSON string:
 
 .. ipython:: python
 
-   pd.read_json(json)
+   from io import StringIO
+   pd.read_json(StringIO(json))
 
 Reading from a file:
 
@@ -2135,6 +2136,7 @@ Preserve string indices:
 
 .. ipython:: python
 
+   from io import StringIO
    si = pd.DataFrame(
        np.zeros((4, 4)), columns=list(range(4)), index=[str(i) for i in range(4)]
    )
@@ -2143,7 +2145,7 @@ Preserve string indices:
    si.columns
    json = si.to_json()
 
-   sij = pd.read_json(json, convert_axes=False)
+   sij = pd.read_json(StringIO(json), convert_axes=False)
    sij
    sij.index
    sij.columns
@@ -2152,18 +2154,19 @@ Dates written in nanoseconds need to be read back in nanoseconds:
 
 .. ipython:: python
 
+   from io import StringIO
    json = dfj2.to_json(date_unit="ns")
 
    # Try to parse timestamps as milliseconds -> Won't Work
-   dfju = pd.read_json(json, date_unit="ms")
+   dfju = pd.read_json(StringIO(json), date_unit="ms")
    dfju
 
    # Let pandas detect the correct precision
-   dfju = pd.read_json(json)
+   dfju = pd.read_json(StringIO(json))
    dfju
 
    # Or specify that all timestamps are in nanoseconds
-   dfju = pd.read_json(json, date_unit="ns")
+   dfju = pd.read_json(StringIO(json), date_unit="ns")
    dfju
 
 By setting the ``dtype_backend`` argument you can control the default dtypes used for the resulting DataFrame.
@@ -2251,11 +2254,12 @@ For line-delimited json files, pandas can also return an iterator which reads in
 
 .. ipython:: python
 
+  from io import StringIO
   jsonl = """
       {"a": 1, "b": 2}
       {"a": 3, "b": 4}
   """
-  df = pd.read_json(jsonl, lines=True)
+  df = pd.read_json(StringIO(jsonl), lines=True)
   df
   df.to_json(orient="records", lines=True)
 
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index ed58554896a4f..443fdd4f59e3f 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -551,13 +551,6 @@ For a DataFrame, you can specify individual values by column:
 
    df.replace({"a": 0, "b": 5}, 100)
 
-Instead of replacing with specified values, you can treat all given values as
-missing and interpolate over them:
-
-.. ipython:: python
-
-   ser.replace([1, 2, 3], method="pad")
-
 .. _missing_data.replace_expression:
 
 String/regular expression replacement
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 5c6435fdaac22..f22a506499cf4 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -59,9 +59,9 @@
     "\n",
     "### Formatting Values\n",
     "\n",
-    "The [Styler][styler] distinguishes the *display* value from the *actual* value, in both data values and index or columns headers. To control the display value, the text is printed in each cell as string, and we can use the [.format()][formatfunc] and [.format_index()][formatfuncindex] methods to manipulate this according to a [format spec string][format] or a callable that takes a single value and returns a string. It is possible to define this for the whole table, or index, or for individual columns, or MultiIndex levels. We can also overwrite index names\n",
+    "The [Styler][styler] distinguishes the *display* value from the *actual* value, in both data values and index or columns headers. To control the display value, the text is printed in each cell as a string, and we can use the [.format()][formatfunc] and [.format_index()][formatfuncindex] methods to manipulate this according to a [format spec string][format] or a callable that takes a single value and returns a string. It is possible to define this for the whole table, or index, or for individual columns, or MultiIndex levels. We can also overwrite index names.\n",
     "\n",
-    "Additionally, the format function has a **precision** argument to specifically help formatting floats, as well as **decimal** and **thousands** separators to support other locales, an **na_rep** argument to display missing data, and an **escape** and **hyperlinks** arguments to help displaying safe-HTML or safe-LaTeX. The default formatter is configured to adopt pandas' global options such as `styler.format.precision` option, controllable using `with pd.option_context('format.precision', 2):` \n",
+    "Additionally, the format function has a **precision** argument to specifically help format floats, as well as **decimal** and **thousands** separators to support other locales, an **na_rep** argument to display missing data, and an **escape** and **hyperlinks** arguments to help displaying safe-HTML or safe-LaTeX. The default formatter is configured to adopt pandas' global options such as `styler.format.precision` option, controllable using `with pd.option_context('format.precision', 2):`\n",
     "\n",
     "[styler]: ../reference/api/pandas.io.formats.style.Styler.rst\n",
     "[format]: https://docs.python.org/3/library/string.html#format-specification-mini-language\n",
diff --git a/doc/source/user_guide/timedeltas.rst b/doc/source/user_guide/timedeltas.rst
index 3a75aa0b39b1f..a6eb96f91a4bf 100644
--- a/doc/source/user_guide/timedeltas.rst
+++ b/doc/source/user_guide/timedeltas.rst
@@ -259,9 +259,6 @@ an alternative is to divide by another timedelta object. Note that division by t
    # to days
    td / np.timedelta64(1, "D")
 
-   # to months (these are constant months)
-   td / np.timedelta64(1, "M")
-
 Dividing or multiplying a ``timedelta64[ns]`` Series by an integer or integer Series
 yields another ``timedelta64[ns]`` dtypes Series.
 
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 97be46e338c09..fb1c37c1b9073 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -1299,6 +1299,31 @@ frequencies. We will refer to these aliases as *offset aliases*.
    given frequency it will roll to the next value for ``start_date``
    (respectively previous for the ``end_date``)
 
+.. _timeseries.period_aliases:
+
+Period aliases
+~~~~~~~~~~~~~~
+
+A number of string aliases are given to useful common time series
+frequencies. We will refer to these aliases as *period aliases*.
+
+.. csv-table::
+    :header: "Alias", "Description"
+    :widths: 15, 100
+
+    "B", "business day frequency"
+    "D", "calendar day frequency"
+    "W", "weekly frequency"
+    "M", "monthly frequency"
+    "Q", "quarterly frequency"
+    "A, Y", "yearly frequency"
+    "H", "hourly frequency"
+    "T, min", "minutely frequency"
+    "S", "secondly frequency"
+    "L, ms", "milliseconds"
+    "U, us", "microseconds"
+    "N", "nanoseconds"
+
 
 Combining aliases
 ~~~~~~~~~~~~~~~~~
@@ -2083,7 +2108,7 @@ Period dtypes
 dtype similar to the :ref:`timezone aware dtype <timeseries.timezone_series>` (``datetime64[ns, tz]``).
 
 The ``period`` dtype holds the ``freq`` attribute and is represented with
-``period[freq]`` like ``period[D]`` or ``period[M]``, using :ref:`frequency strings <timeseries.offset_aliases>`.
+``period[freq]`` like ``period[D]`` or ``period[M]``, using :ref:`frequency strings <timeseries.period_aliases>`.
 
 .. ipython:: python
 
diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst
index 01bf999a1f99f..ea80a2804256c 100644
--- a/doc/source/user_guide/window.rst
+++ b/doc/source/user_guide/window.rst
@@ -89,6 +89,7 @@ For example, a `weighted mean <https://en.wikipedia.org/wiki/Weighted_arithmetic
 be calculated with :meth:`~Rolling.apply` by specifying a separate column of weights.
 
 .. ipython:: python
+   :okwarning:
 
    def weighted_mean(x):
        arr = np.ones((1, x.shape[1]))
@@ -114,6 +115,7 @@ the ``update`` argument to continue the windowing calculation.
    df.ewm(0.5).mean()
 
 .. ipython:: python
+   :okwarning:
 
    online_ewm = df.head(2).ewm(0.5).online()
    online_ewm.mean()
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index badf3f0f68627..9653226b96196 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -474,19 +474,22 @@ upon serialization. (Related issue :issue:`12997`)
 
 .. code-block:: ipython
 
-    In [4]: a.to_json(date_format='iso')
-    Out[4]: '{"2020-12-28T00:00:00.000Z":0,"2020-12-28T01:00:00.000Z":1,"2020-12-28T02:00:00.000Z":2}'
+    In [4]: from io import StringIO
 
-    In [5]: pd.read_json(a.to_json(date_format='iso'), typ="series").index == a.index
-    Out[5]: array([False, False, False])
+    In [5]: a.to_json(date_format='iso')
+    Out[5]: '{"2020-12-28T00:00:00.000Z":0,"2020-12-28T01:00:00.000Z":1,"2020-12-28T02:00:00.000Z":2}'
+
+    In [6]: pd.read_json(StringIO(a.to_json(date_format='iso')), typ="series").index == a.index
+    Out[6]: array([False, False, False])
 
 *New Behavior*
 
 .. ipython:: python
 
+    from io import StringIO
     a.to_json(date_format='iso')
     # Roundtripping now works
-    pd.read_json(a.to_json(date_format='iso'), typ="series").index == a.index
+    pd.read_json(StringIO(a.to_json(date_format='iso')), typ="series").index == a.index
 
 .. _whatsnew_150.notable_bug_fixes.groupby_value_counts_categorical:
 
diff --git a/doc/source/whatsnew/v2.0.3.rst b/doc/source/whatsnew/v2.0.3.rst
index 89c64b02e0cb5..3e12af946e661 100644
--- a/doc/source/whatsnew/v2.0.3.rst
+++ b/doc/source/whatsnew/v2.0.3.rst
@@ -14,6 +14,7 @@ including other versions of pandas.
 Fixed regressions
 ~~~~~~~~~~~~~~~~~
 - Fixed performance regression in merging on datetime-like columns (:issue:`53231`)
+- For external ExtensionArray implementations, restored the default use of ``_values_for_factorize`` for hashing arrays (:issue:`53475`)
 -
 
 .. ---------------------------------------------------------------------------
@@ -21,9 +22,14 @@ Fixed regressions
 
 Bug fixes
 ~~~~~~~~~
+- Bug in :func:`DataFrame.convert_dtype` and :func:`Series.convert_dtype` when trying to convert :class:`ArrowDtype` with ``dtype_backend="nullable_numpy"`` (:issue:`53648`)
+- Bug in :func:`RangeIndex.union` when using ``sort=True`` with another :class:`RangeIndex` (:issue:`53490`)
+- Bug in :func:`Series.reindex` when expanding a non-nanosecond datetime or timedelta :class:`Series` would not fill with ``NaT`` correctly (:issue:`53497`)
 - Bug in :func:`read_csv` when defining ``dtype`` with ``bool[pyarrow]`` for the ``"c"`` and ``"python"`` engines (:issue:`53390`)
 - Bug in :meth:`Series.str.split` and :meth:`Series.str.rsplit` with ``expand=True`` for :class:`ArrowDtype` with ``pyarrow.string`` (:issue:`53532`)
--
+- Bug in indexing methods (e.g. :meth:`DataFrame.__getitem__`) where taking the entire :class:`DataFrame`/:class:`Series` would raise an ``OverflowError`` when Copy on Write was enabled and the length of the array was over the maximum size a 32-bit integer can hold (:issue:`53616`)
+- Bug when constructing a :class:`DataFrame` with columns of an :class:`ArrowDtype` with a ``pyarrow.dictionary`` type that reindexes the data (:issue:`53617`)
+- Bug when indexing a :class:`DataFrame` or :class:`Series` with an :class:`Index` with a timestamp :class:`ArrowDtype` would raise an ``AttributeError`` (:issue:`53644`)
 
 .. ---------------------------------------------------------------------------
 .. _whatsnew_203.other:
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index a949ec808c5cd..2436d91690ed3 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -14,10 +14,17 @@ including other versions of pandas.
 Enhancements
 ~~~~~~~~~~~~
 
-.. _whatsnew_210.enhancements.enhancement1:
+.. _whatsnew_210.enhancements.cow:
 
-enhancement1
-^^^^^^^^^^^^
+Copy-on-Write improvements
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- :meth:`Series.transform` not respecting Copy-on-Write when ``func`` modifies :class:`Series` inplace (:issue:`53747`)
+- Calling :meth:`Index.values` will now return a read-only NumPy array (:issue:`53704`)
+- Setting a :class:`Series` into a :class:`DataFrame` now creates a lazy instead of a deep copy (:issue:`53142`)
+- The :class:`DataFrame` constructor, when constructing a DataFrame from a dictionary
+  of Index objects and specifying ``copy=False``, will now use a lazy copy
+  of those Index objects for the columns of the DataFrame (:issue:`52947`)
 
 .. _whatsnew_210.enhancements.enhancement2:
 
@@ -100,9 +107,14 @@ Other enhancements
 - :meth:`DataFrame.stack` gained the ``sort`` keyword to dictate whether the resulting :class:`MultiIndex` levels are sorted (:issue:`15105`)
 - :meth:`DataFrame.unstack` gained the ``sort`` keyword to dictate whether the resulting :class:`MultiIndex` levels are sorted (:issue:`15105`)
 - :meth:`DataFrameGroupby.agg` and :meth:`DataFrameGroupby.transform` now support grouping by multiple keys when the index is not a :class:`MultiIndex` for ``engine="numba"`` (:issue:`53486`)
+- :meth:`Series.explode` now supports pyarrow-backed list types (:issue:`53602`)
+- :meth:`Series.str.join` now supports ``ArrowDtype(pa.string())`` (:issue:`53646`)
 - :meth:`SeriesGroupby.agg` and :meth:`DataFrameGroupby.agg` now support passing in multiple functions for ``engine="numba"`` (:issue:`53486`)
+- :meth:`SeriesGroupby.transform` and :meth:`DataFrameGroupby.transform` now support passing in a string as the function for ``engine="numba"`` (:issue:`53579`)
 - Added ``engine_kwargs`` parameter to :meth:`DataFrame.to_excel` (:issue:`53220`)
 - Added a new parameter ``by_row`` to :meth:`Series.apply`. When set to ``False`` the supplied callables will always operate on the whole Series (:issue:`53400`).
+- Groupby aggregations (such as :meth:`DataFrameGroupby.sum`) now can preserve the dtype of the input instead of casting to ``float64`` (:issue:`44952`)
+- Improved error message when :meth:`DataFrameGroupBy.agg` failed (:issue:`52930`)
 - Many read/to_* functions, such as :meth:`DataFrame.to_pickle` and :func:`read_csv`, support forwarding compression arguments to lzma.LZMAFile (:issue:`52979`)
 - Performance improvement in :func:`concat` with homogeneous ``np.float64`` or ``np.float32`` dtypes (:issue:`52685`)
 - Performance improvement in :meth:`DataFrame.filter` when ``items`` is given (:issue:`52941`)
@@ -182,6 +194,8 @@ If installed, we now require:
 +-----------------+-----------------+----------+---------+
 | pytables        | 3.7.0           |          |    X    |
 +-----------------+-----------------+----------+---------+
+| pytest          | 7.3.2           |          |    X    |
++-----------------+-----------------+----------+---------+
 | python-snappy   | 0.6.1           |          |    X    |
 +-----------------+-----------------+----------+---------+
 | pyxlsb          | 1.0.9           |          |    X    |
@@ -225,6 +239,7 @@ Other API changes
 Deprecations
 ~~~~~~~~~~~~
 - Deprecated 'broadcast_axis' keyword in :meth:`Series.align` and :meth:`DataFrame.align`, upcast before calling ``align`` with ``left = DataFrame({col: left for col in right.columns}, index=right.index)`` (:issue:`51856`)
+- Deprecated 'fill_method' and 'limit' keywords in :meth:`DataFrame.pct_change`, :meth:`Series.pct_change`, :meth:`DataFrameGroupBy.pct_change`, and :meth:`SeriesGroupBy.pct_change`, explicitly call ``ffill`` or ``bfill`` before calling ``pct_change`` instead (:issue:`53491`)
 - Deprecated 'method', 'limit', and 'fill_axis' keywords in :meth:`DataFrame.align` and :meth:`Series.align`, explicitly call ``fillna`` on the alignment results instead (:issue:`51856`)
 - Deprecated 'quantile' keyword in :meth:`Rolling.quantile` and :meth:`Expanding.quantile`, renamed as 'q' instead (:issue:`52550`)
 - Deprecated :meth:`.DataFrameGroupBy.apply` and methods on the objects returned by :meth:`.DataFrameGroupBy.resample` operating on the grouping column(s); select the columns to operate on after groupby to either explicitly include or exclude the groupings and avoid the ``FutureWarning`` (:issue:`7155`)
@@ -273,15 +288,23 @@ Deprecations
 - Deprecated unused "closed" and "normalize" keywords in the :class:`DatetimeIndex` constructor (:issue:`52628`)
 - Deprecated unused "closed" keyword in the :class:`TimedeltaIndex` constructor (:issue:`52628`)
 - Deprecated logical operation between two non boolean :class:`Series` with different indexes always coercing the result to bool dtype. In a future version, this will maintain the return type of the inputs. (:issue:`52500`, :issue:`52538`)
+- Deprecated :func:`value_counts`, use ``pd.Series(obj).value_counts()`` instead (:issue:`47862`)
 - Deprecated :meth:`Series.first` and :meth:`DataFrame.first` (please create a mask and filter using ``.loc`` instead) (:issue:`45908`)
+- Deprecated :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` for object-dtype (:issue:`53631`)
+- Deprecated :meth:`Series.last` and :meth:`DataFrame.last` (please create a mask and filter using ``.loc`` instead) (:issue:`53692`)
 - Deprecated allowing ``downcast`` keyword other than ``None``, ``False``, "infer", or a dict with these as values in :meth:`Series.fillna`, :meth:`DataFrame.fillna` (:issue:`40988`)
 - Deprecated allowing arbitrary ``fill_value`` in :class:`SparseDtype`, in a future version the ``fill_value`` will need to be compatible with the ``dtype.subtype``, either a scalar that can be held by that subtype or ``NaN`` for integer or bool subtypes (:issue:`23124`)
 - Deprecated behavior of :func:`assert_series_equal` and :func:`assert_frame_equal` considering NA-like values (e.g. ``NaN`` vs ``None`` as equivalent) (:issue:`52081`)
 - Deprecated constructing :class:`SparseArray` from scalar data, pass a sequence instead (:issue:`53039`)
+- Deprecated falling back to filling when ``value`` is not specified in :meth:`DataFrame.replace` and :meth:`Series.replace` with non-dict-like ``to_replace`` (:issue:`33302`)
+- Deprecated literal json input to :func:`read_json`. Wrap literal json string input in ``io.StringIO`` instead. (:issue:`53409`)
 - Deprecated option "mode.use_inf_as_na", convert inf entries to ``NaN`` before instead (:issue:`51684`)
+- Deprecated parameter ``obj`` in :meth:`GroupBy.get_group` (:issue:`53545`)
 - Deprecated positional indexing on :class:`Series` with :meth:`Series.__getitem__` and :meth:`Series.__setitem__`, in a future version ``ser[item]`` will *always* interpret ``item`` as a label, not a position (:issue:`50617`)
+- Deprecated strings ``T``, ``t``, ``L`` and ``l`` denoting units in :func:`to_timedelta` (:issue:`52536`)
 - Deprecated the "method" and "limit" keywords on :meth:`Series.fillna`, :meth:`DataFrame.fillna`, :meth:`SeriesGroupBy.fillna`, :meth:`DataFrameGroupBy.fillna`, and :meth:`Resampler.fillna`, use ``obj.bfill()`` or ``obj.ffill()`` instead (:issue:`53394`)
--
+- Deprecated the ``method`` and ``limit`` keywords in :meth:`DataFrame.replace` and :meth:`Series.replace` (:issue:`33302`)
+- Deprecated values "pad", "ffill", "bfill", "backfill" for :meth:`Series.interpolate` and :meth:`DataFrame.interpolate`, use ``obj.ffill()`` or ``obj.bfill()`` instead (:issue:`53581`)
 
 .. ---------------------------------------------------------------------------
 .. _whatsnew_210.performance:
@@ -307,12 +330,15 @@ Performance improvements
 - Performance improvement accessing :attr:`arrays.IntegerArrays.dtype` & :attr:`arrays.FloatingArray.dtype` (:issue:`52998`)
 - Performance improvement in :class:`Series` reductions (:issue:`52341`)
 - Performance improvement in :func:`concat` when ``axis=1`` and objects have different indexes (:issue:`52541`)
+- Performance improvement in :func:`concat` when the concatenation axis is a :class:`MultiIndex` (:issue:`53574`)
 - Performance improvement in :meth:`.DataFrameGroupBy.groups` (:issue:`53088`)
 - Performance improvement in :meth:`DataFrame.isin` for extension dtypes (:issue:`53514`)
 - Performance improvement in :meth:`DataFrame.loc` when selecting rows and columns (:issue:`53014`)
 - Performance improvement in :meth:`Series.add` for pyarrow string and binary dtypes (:issue:`53150`)
 - Performance improvement in :meth:`Series.corr` and :meth:`Series.cov` for extension dtypes (:issue:`52502`)
+- Performance improvement in :meth:`Series.str.get_dummies` for pyarrow-backed strings (:issue:`53655`)
 - Performance improvement in :meth:`Series.str.get` for pyarrow-backed strings (:issue:`53152`)
+- Performance improvement in :meth:`Series.str.split` with ``expand=True`` for pyarrow-backed strings (:issue:`53585`)
 - Performance improvement in :meth:`Series.to_numpy` when dtype is a numpy float dtype and ``na_value`` is ``np.nan`` (:issue:`52430`)
 - Performance improvement in :meth:`~arrays.ArrowExtensionArray.astype` when converting from a pyarrow timestamp or duration dtype to numpy (:issue:`53326`)
 - Performance improvement in :meth:`~arrays.ArrowExtensionArray.to_numpy` (:issue:`52525`)
@@ -328,6 +354,7 @@ Bug fixes
 
 Categorical
 ^^^^^^^^^^^
+- Bug in :meth:`Series.astype` with ``dtype="category"`` for nullable arrays with read-only null value masks (:issue:`53658`)
 - Bug in :meth:`Series.map` , where the value of the ``na_action`` parameter was not used if the series held a :class:`Categorical` (:issue:`22527`).
 -
 
@@ -335,16 +362,17 @@ Datetimelike
 ^^^^^^^^^^^^
 - :meth:`DatetimeIndex.map` with ``na_action="ignore"`` now works as expected. (:issue:`51644`)
 - Bug in :func:`date_range` when ``freq`` was a :class:`DateOffset` with ``nanoseconds`` (:issue:`46877`)
+- Bug in :meth:`Timestamp.date`, :meth:`Timestamp.isocalendar`, :meth:`Timestamp.timetuple`, and :meth:`Timestamp.toordinal` were returning incorrect results for inputs outside those supported by the Python standard library's datetime module (:issue:`53668`)
 - Bug in :meth:`Timestamp.round` with values close to the implementation bounds returning incorrect results instead of raising ``OutOfBoundsDatetime`` (:issue:`51494`)
 - Bug in :meth:`arrays.DatetimeArray.map` and :meth:`DatetimeIndex.map`, where the supplied callable operated array-wise instead of element-wise (:issue:`51977`)
 - Bug in constructing a :class:`Series` or :class:`DataFrame` from a datetime or timedelta scalar always inferring nanosecond resolution instead of inferring from the input (:issue:`52212`)
 - Bug in parsing datetime strings with weekday but no day e.g. "2023 Sept Thu" incorrectly raising ``AttributeError`` instead of ``ValueError`` (:issue:`52659`)
--
 
 Timedelta
 ^^^^^^^^^
 - :meth:`TimedeltaIndex.map` with ``na_action="ignore"`` now works as expected (:issue:`51644`)
 - Bug in :class:`TimedeltaIndex` division or multiplication leading to ``.freq`` of "0 Days" instead of ``None`` (:issue:`51575`)
+- Bug in :class:`Timedelta` with Numpy timedelta64 objects not properly raising ``ValueError`` (:issue:`52806`)
 - Bug in :meth:`Timedelta.round` with values close to the implementation bounds returning incorrect results instead of raising ``OutOfBoundsTimedelta`` (:issue:`51494`)
 - Bug in :meth:`arrays.TimedeltaArray.map` and :meth:`TimedeltaIndex.map`, where the supplied callable operated array-wise instead of element-wise (:issue:`51977`)
 -
@@ -386,19 +414,19 @@ Strings
 
 Interval
 ^^^^^^^^
--
+- :meth:`pd.IntervalIndex.get_indexer` and :meth:`pd.IntervalIndex.get_indexer_nonunique` raising if ``target`` is read-only array (:issue:`53703`)
 -
 
 Indexing
 ^^^^^^^^
 - Bug in :meth:`DataFrame.__setitem__` losing dtype when setting a :class:`DataFrame` into duplicated columns (:issue:`53143`)
 - Bug in :meth:`DataFrame.__setitem__` with a boolean mask and :meth:`DataFrame.putmask` with mixed non-numeric dtypes and a value other than ``NaN`` incorrectly raising ``TypeError`` (:issue:`53291`)
--
 
 Missing
 ^^^^^^^
 - Bug in :meth:`DataFrame.interpolate` ignoring ``inplace`` when :class:`DataFrame` is empty (:issue:`53199`)
 - Bug in :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` failing to raise on invalid ``downcast`` keyword, which can be only ``None`` or "infer" (:issue:`53103`)
+- Bug in :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` with complex dtype incorrectly failing to fill ``NaN`` entries (:issue:`53635`)
 -
 
 MultiIndex
@@ -438,6 +466,7 @@ Plotting
 Groupby/resample/rolling
 ^^^^^^^^^^^^^^^^^^^^^^^^
 - Bug in :meth:`DataFrame.resample` and :meth:`Series.resample` in incorrectly allowing non-fixed ``freq`` when resampling on a :class:`TimedeltaIndex` (:issue:`51896`)
+- Bug in :meth:`DataFrame.resample` and :meth:`Series.resample` losing time zone when resampling empty data (:issue:`53664`)
 - Bug in :meth:`DataFrameGroupBy.idxmin`, :meth:`SeriesGroupBy.idxmin`, :meth:`DataFrameGroupBy.idxmax`, :meth:`SeriesGroupBy.idxmax` return wrong dtype when used on empty DataFrameGroupBy or SeriesGroupBy (:issue:`51423`)
 - Bug in weighted rolling aggregations when specifying ``min_periods=0`` (:issue:`51449`)
 - Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby`, where, when the index of the
@@ -451,15 +480,18 @@ Groupby/resample/rolling
 - Bug in :meth:`GroupBy.groups` with a datetime key in conjunction with another key produced incorrect number of group keys (:issue:`51158`)
 - Bug in :meth:`GroupBy.quantile` may implicitly sort the result index with ``sort=False`` (:issue:`53009`)
 - Bug in :meth:`GroupBy.var` failing to raise ``TypeError`` when called with datetime64, timedelta64 or :class:`PeriodDtype` values (:issue:`52128`, :issue:`53045`)
-- Bug in :meth:`SeriresGroupBy.nth` and :meth:`DataFrameGroupBy.nth` after performing column selection when using ``dropna="any"`` or ``dropna="all"`` would not subset columns (:issue:`53518`)
-- Bug in :meth:`SeriresGroupBy.nth` and :meth:`DataFrameGroupBy.nth` raised after performing column selection when using ``dropna="any"`` or ``dropna="all"`` resulted in rows being dropped (:issue:`53518`)
+- Bug in :meth:`SeriesGroupBy.nth` and :meth:`DataFrameGroupBy.nth` after performing column selection when using ``dropna="any"`` or ``dropna="all"`` would not subset columns (:issue:`53518`)
+- Bug in :meth:`SeriesGroupBy.nth` and :meth:`DataFrameGroupBy.nth` raised after performing column selection when using ``dropna="any"`` or ``dropna="all"`` resulted in rows being dropped (:issue:`53518`)
+- Bug in :meth:`SeriesGroupBy.sum` and :meth:`DataFrameGroupby.sum` summing ``np.inf + np.inf`` and ``(-np.inf) + (-np.inf)`` to ``np.nan`` (:issue:`53606`)
 
 Reshaping
 ^^^^^^^^^
+- Bug in :func:`concat` coercing to ``object`` dtype when one column has ``pa.null()`` dtype (:issue:`53702`)
 - Bug in :func:`crosstab` when ``dropna=False`` would not keep ``np.nan`` in the result (:issue:`10772`)
 - Bug in :func:`merge_asof` raising ``KeyError`` for extension dtypes (:issue:`52904`)
 - Bug in :func:`merge_asof` raising ``ValueError`` for data backed by read-only ndarrays (:issue:`53513`)
 - Bug in :meth:`DataFrame.agg` and :meth:`Series.agg` on non-unique columns would return incorrect type when dist-like argument passed in (:issue:`51099`)
+- Bug in :meth:`DataFrame.combine_first` ignoring other's columns if ``other`` is empty (:issue:`53792`)
 - Bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax`, where the axis dtype would be lost for empty frames (:issue:`53265`)
 - Bug in :meth:`DataFrame.merge` not merging correctly when having ``MultiIndex`` with single level (:issue:`52331`)
 - Bug in :meth:`DataFrame.stack` losing extension dtypes when columns is a :class:`MultiIndex` and frame contains mixed dtypes (:issue:`45740`)
@@ -494,14 +526,17 @@ Metadata
 
 Other
 ^^^^^
+- Bug in :class:`DataFrame` and :class:`Series` raising for data of complex dtype when ``NaN`` values are present (:issue:`53627`)
 - Bug in :class:`FloatingArray.__contains__` with ``NaN`` item incorrectly returning ``False`` when ``NaN`` values are present (:issue:`52840`)
 - Bug in :func:`api.interchange.from_dataframe` when converting an empty DataFrame object (:issue:`53155`)
 - Bug in :func:`assert_almost_equal` now throwing assertion error for two unequal sets (:issue:`51727`)
 - Bug in :func:`assert_frame_equal` checks category dtypes even when asked not to check index type (:issue:`52126`)
 - Bug in :meth:`DataFrame.reindex` with a ``fill_value`` that should be inferred with a :class:`ExtensionDtype` incorrectly inferring ``object`` dtype (:issue:`52586`)
+- Bug in :meth:`Series.align`, :meth:`DataFrame.align`, :meth:`Series.reindex`, :meth:`DataFrame.reindex`, :meth:`Series.interpolate`, :meth:`DataFrame.interpolate`, incorrectly failing to raise with method="asfreq" (:issue:`53620`)
 - Bug in :meth:`Series.map` when giving a callable to an empty series, the returned series had ``object`` dtype. It now keeps the original dtype (:issue:`52384`)
 - Bug in :meth:`Series.memory_usage` when ``deep=True`` throw an error with Series of objects and the returned value is incorrect, as it does not take into account GC corrections (:issue:`51858`)
 - Fixed incorrect ``__name__`` attribute of ``pandas._libs.json`` (:issue:`52898`)
+-
 
 .. ***DO NOT USE THIS SECTION***
 
diff --git a/environment.yml b/environment.yml
index fb8321a9fb6a7..6178fe896760f 100644
--- a/environment.yml
+++ b/environment.yml
@@ -13,7 +13,7 @@ dependencies:
   - meson-python=0.13.1
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - pytest-asyncio>=0.17.0
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 61f448cbe0c3f..501d572c6623c 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -746,6 +746,13 @@ def group_sum(
                         y = val - compensation[lab, j]
                         t = sumx[lab, j] + y
                         compensation[lab, j] = t - sumx[lab, j] - y
+                        if compensation[lab, j] != compensation[lab, j]:
+                            # GH#53606
+                            # If val is +/- infinity compensation is NaN
+                            # which would lead to results being NaN instead
+                            # of +/- infinity. We cannot use util.is_nan
+                            # because of no gil
+                            compensation[lab, j] = 0
                         sumx[lab, j] = t
 
             _check_below_mincount(
@@ -943,7 +950,7 @@ def group_skew(
                     isna_entry = _treat_as_na(val, False)
 
                 if not isna_entry:
-                    # Based on RunningSats::Push from
+                    # Based on RunningStats::Push from
                     #  https://www.johndcook.com/blog/skewness_kurtosis/
                     n1 = nobs[lab, j]
                     n = n1 + 1
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index a3a1cdf374bb1..1cf5d734705af 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -648,7 +648,7 @@ cdef class {{name}}HashTable(HashTable):
             UInt8Vector result_mask
             UInt8VectorData *rmd
             bint use_na_value, use_mask, seen_na = False
-            uint8_t[:] mask_values
+            const uint8_t[:] mask_values
 
         if return_inverse:
             labels = np.empty(n, dtype=np.intp)
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index 67fee7c5fbadd..0b99aebbd3816 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -125,7 +125,7 @@ cdef class IntervalTree(IntervalMixin):
         sort_order = self.left_sorter
         return is_monotonic(sort_order, False)[0]
 
-    def get_indexer(self, scalar_t[:] target) -> np.ndarray:
+    def get_indexer(self, ndarray[scalar_t, ndim=1] target) -> np.ndarray:
         """Return the positions corresponding to unique intervals that overlap
         with the given array of scalar targets.
         """
@@ -153,7 +153,7 @@ cdef class IntervalTree(IntervalMixin):
             old_len = result.data.n
         return result.to_array().astype('intp')
 
-    def get_indexer_non_unique(self, scalar_t[:] target):
+    def get_indexer_non_unique(self, ndarray[scalar_t, ndim=1] target):
         """Return the positions corresponding to intervals that overlap with
         the given array of scalar targets. Non-unique positions are repeated.
         """
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index e68dbfa26a104..a622de742a840 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -668,7 +668,7 @@ ctypedef fused int6432_t:
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def is_range_indexer(ndarray[int6432_t, ndim=1] left, int n) -> bool:
+def is_range_indexer(ndarray[int6432_t, ndim=1] left, Py_ssize_t n) -> bool:
     """
     Perform an element by element comparison on 1-d integer arrays, meant for indexer
     comparisons
@@ -2485,7 +2485,14 @@ def maybe_convert_objects(ndarray[object] objects,
         elif util.is_nan(val):
             seen.nan_ = True
             mask[i] = True
-            floats[i] = complexes[i] = val
+            if util.is_complex_object(val):
+                floats[i] = fnan
+                complexes[i] = val
+                seen.complex_ = True
+                if not convert_numeric:
+                    break
+            else:
+                floats[i] = complexes[i] = val
         elif util.is_bool_object(val):
             seen.bool_ = True
             bools[i] = val
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index 1dca498f61b0e..a8dd88c763c14 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -9,6 +9,7 @@ cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) noexcept nogil
 cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1
 cpdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1
 cpdef NPY_DATETIMEUNIT get_supported_reso(NPY_DATETIMEUNIT reso)
+cpdef bint is_supported_unit(NPY_DATETIMEUNIT reso)
 
 cdef dict attrname_to_abbrevs
 cdef dict npy_unit_to_attrname
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 2a49ea6760e72..19f4c83e6cecf 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -321,7 +321,7 @@ cpdef NPY_DATETIMEUNIT get_supported_reso(NPY_DATETIMEUNIT reso):
     return reso
 
 
-def is_supported_unit(NPY_DATETIMEUNIT reso):
+cpdef bint is_supported_unit(NPY_DATETIMEUNIT reso):
     return (
         reso == NPY_DATETIMEUNIT.NPY_FR_ns
         or reso == NPY_DATETIMEUNIT.NPY_FR_us
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index ff07f5d799339..09e38e3a979b2 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -4,7 +4,6 @@ from cpython.datetime cimport (
     PyDelta_Check,
     datetime,
     import_datetime,
-    timedelta,
 )
 
 import_datetime()
@@ -257,7 +256,16 @@ cdef class _NaT(datetime):
 
     def to_datetime64(self) -> np.datetime64:
         """
-        Return a numpy.datetime64 object with 'ns' precision.
+        Return a numpy.datetime64 object with same precision.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp(year=2023, month=1, day=1,
+        ...                   hour=10, second=15)
+        >>> ts
+        Timestamp('2023-01-01 10:00:15')
+        >>> ts.to_datetime64()
+        numpy.datetime64('2023-01-01T10:00:15.000000')
         """
         return np.datetime64("NaT", "ns")
 
@@ -430,6 +438,14 @@ class NaTType(_NaT):
         Return the day of the week represented by the date.
 
         Monday == 0 ... Sunday == 6.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01')
+        >>> ts
+        Timestamp('2023-01-01  00:00:00')
+        >>> ts.weekday()
+        6
         """,
     )
     isoweekday = _make_nan_func(
@@ -438,9 +454,30 @@ class NaTType(_NaT):
         Return the day of the week represented by the date.
 
         Monday == 1 ... Sunday == 7.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.isoweekday()
+        7
+        """,
+    )
+    total_seconds = _make_nan_func(
+        "total_seconds",
+        """
+        Total seconds in the duration.
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('1min')
+        >>> td
+        Timedelta('0 days 00:01:00')
+        >>> td.total_seconds()
+        60.0
         """,
     )
-    total_seconds = _make_nan_func("total_seconds", timedelta.total_seconds.__doc__)
     month_name = _make_nan_func(
         "month_name",
         """
@@ -494,18 +531,6 @@ class NaTType(_NaT):
         """,
     )
     # _nat_methods
-    date = _make_nat_func("date", datetime.date.__doc__)
-
-    utctimetuple = _make_error_func("utctimetuple", datetime)
-    timetz = _make_error_func("timetz", datetime)
-    timetuple = _make_error_func("timetuple", datetime)
-    isocalendar = _make_error_func("isocalendar", datetime)
-    dst = _make_error_func("dst", datetime)
-    ctime = _make_error_func("ctime", datetime)
-    time = _make_error_func("time", datetime)
-    toordinal = _make_error_func("toordinal", datetime)
-    tzname = _make_error_func("tzname", datetime)
-    utcoffset = _make_error_func("utcoffset", datetime)
 
     # "fromisocalendar" was introduced in 3.8
     fromisocalendar = _make_error_func("fromisocalendar", datetime)
@@ -513,6 +538,162 @@ class NaTType(_NaT):
     # ----------------------------------------------------------------------
     # The remaining methods have docstrings copy/pasted from the analogous
     # Timestamp methods.
+    isocalendar = _make_error_func(
+        "isocalendar",
+        """
+        Return a named tuple containing ISO year, week number, and weekday.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.isocalendar()
+        datetime.IsoCalendarDate(year=2022, week=52, weekday=7)
+        """
+        )
+    dst = _make_error_func(
+        "dst",
+        """
+        Return the daylight saving time (DST) adjustment.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2000-06-01 00:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2000-06-01 00:00:00+0200', tz='Europe/Brussels')
+        >>> ts.dst()
+        datetime.timedelta(seconds=3600)
+        """
+        )
+    date = _make_nat_func(
+        "date",
+        """
+        Return date object with same year, month and day.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00.00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.date()
+        datetime.date(2023, 1, 1)
+        """
+        )
+    utctimetuple = _make_error_func(
+        "utctimetuple",
+        """
+        Return UTC time tuple, compatible with time.localtime().
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00+0100', tz='Europe/Brussels')
+        >>> ts.utctimetuple()
+        time.struct_time(tm_year=2023, tm_mon=1, tm_mday=1, tm_hour=9,
+        tm_min=0, tm_sec=0, tm_wday=6, tm_yday=1, tm_isdst=0)
+        """
+        )
+    utcoffset = _make_error_func(
+        "utcoffset",
+        """
+        Return utc offset.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00+0100', tz='Europe/Brussels')
+        >>> ts.utcoffset()
+        datetime.timedelta(seconds=3600)
+        """
+        )
+    tzname = _make_error_func(
+        "tzname",
+        """
+        Return time zone name.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00+0100', tz='Europe/Brussels')
+        >>> ts.tzname()
+        'CET'
+        """
+        )
+    time = _make_error_func(
+        "time",
+        """
+        Return time object with same time but with tzinfo=None.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.time()
+        datetime.time(10, 0)
+        """,
+        )
+    timetuple = _make_error_func(
+        "timetuple",
+        """
+        Return time tuple, compatible with time.localtime().
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.timetuple()
+        time.struct_time(tm_year=2023, tm_mon=1, tm_mday=1,
+        tm_hour=10, tm_min=0, tm_sec=0, tm_wday=6, tm_yday=1, tm_isdst=-1)
+        """
+        )
+    timetz = _make_error_func(
+        "timetz",
+        """
+        Return time object with same time and tzinfo.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00+0100', tz='Europe/Brussels')
+        >>> ts.timetz()
+        datetime.time(10, 0, tzinfo=<DstTzInfo 'Europe/Brussels' CET+1:00:00 STD>)
+        """
+        )
+    toordinal = _make_error_func(
+        "toordinal",
+        """
+        Return proleptic Gregorian ordinal. January 1 of year 1 is day 1.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:50')
+        >>> ts
+        Timestamp('2023-01-01 10:00:50')
+        >>> ts.toordinal()
+        738521
+        """
+        )
+    ctime = _make_error_func(
+        "ctime",
+        """
+        Return ctime() style string.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00.00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.ctime()
+        'Sun Jan  1 10:00:00 2023'
+        """,
+    )
 
     strftime = _make_error_func(
         "strftime",
@@ -540,6 +721,12 @@ class NaTType(_NaT):
         Timestamp.strptime(string, format)
 
         Function is not implemented. Use pd.to_datetime().
+
+        Examples
+        --------
+        >>> pd.Timestamp.strptime("2023-01-01", "%d/%m/%y")
+        Traceback (most recent call last):
+        NotImplementedError
         """,
     )
 
@@ -1210,6 +1397,19 @@ default 'raise'
         Returns
         -------
         Timestamp
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 00:00:00.01')
+        >>> ts
+        Timestamp('2023-01-01 00:00:00.010000')
+        >>> ts.unit
+        'ms'
+        >>> ts = ts.as_unit('s')
+        >>> ts
+        Timestamp('2023-01-01 00:00:00')
+        >>> ts.unit
+        's'
         """
         return c_NaT
 
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 54bf041d59264..0a181c5bfdfa0 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1917,13 +1917,20 @@ cdef class _Period(PeriodMixin):
         Parameters
         ----------
         freq : str, BaseOffset
-            The desired frequency.
+            The desired frequency. If passing a `str`, it needs to be a
+            valid :ref:`period alias <timeseries.period_aliases>`.
         how : {'E', 'S', 'end', 'start'}, default 'end'
             Start or end of the timespan.
 
         Returns
         -------
         resampled : Period
+
+        Examples
+        --------
+        >>> period = pd.Period('2023-1-1', freq='D')
+        >>> period.asfreq('H')
+        Period('2023-01-01 23:00', 'H')
         """
         freq = self._maybe_convert_freq(freq)
         how = validate_end_alias(how)
@@ -2459,6 +2466,11 @@ cdef class _Period(PeriodMixin):
         ----------
         freq : str, BaseOffset
             Frequency to use for the returned period.
+
+        Examples
+        --------
+        >>> pd.Period.now('H')  # doctest: +SKIP
+        Period('2023-06-12 11:00', 'H')
         """
         return Period(datetime.now(), freq=freq)
 
@@ -2656,7 +2668,7 @@ class Period(_Period):
 
     Parameters
     ----------
-    value : Period or str, default None
+    value : Period, str, datetime, date or pandas.Timestamp, default None
         The time period represented (e.g., '4Q2005'). This represents neither
         the start or the end of the period, but rather the entire period itself.
     freq : str, default None
diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi
index 0d5afbfe963f1..448aed0eebe4c 100644
--- a/pandas/_libs/tslibs/timedeltas.pyi
+++ b/pandas/_libs/tslibs/timedeltas.pyi
@@ -37,6 +37,7 @@ UnitChoices = Literal[
     "minute",
     "min",
     "minutes",
+    "T",
     "t",
     "s",
     "seconds",
@@ -47,6 +48,7 @@ UnitChoices = Literal[
     "millisecond",
     "milli",
     "millis",
+    "L",
     "l",
     "us",
     "microseconds",
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 047b5e861da2c..0981c966c4cd4 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -42,6 +42,7 @@ from pandas._libs.tslibs.conversion cimport (
 )
 from pandas._libs.tslibs.dtypes cimport (
     get_supported_reso,
+    is_supported_unit,
     npy_unit_to_abbrev,
 )
 from pandas._libs.tslibs.nattype cimport (
@@ -151,10 +152,10 @@ cdef dict timedelta_abbrevs = {
 
 _no_input = object()
 
-
 # ----------------------------------------------------------------------
 # API
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def ints_to_pytimedelta(ndarray m8values, box=False):
@@ -973,7 +974,7 @@ cdef _timedelta_from_value_and_reso(cls, int64_t value, NPY_DATETIMEUNIT reso):
             "Only resolutions 's', 'ms', 'us', 'ns' are supported."
         )
 
-    td_base._value= value
+    td_base._value = value
     td_base._is_populated = 0
     td_base._creso = reso
     return td_base
@@ -1112,7 +1113,17 @@ cdef class _Timedelta(timedelta):
         return self._ms * 1000 + self._us
 
     def total_seconds(self) -> float:
-        """Total seconds in the duration."""
+        """
+        Total seconds in the duration.
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('1min')
+        >>> td
+        Timedelta('0 days 00:01:00')
+        >>> td.total_seconds()
+        60.0
+        """
         # We need to override bc we overrode days/seconds/microseconds
         # TODO: add nanos/1e9?
         return self.days * 24 * 3600 + self.seconds + self.microseconds / 1_000_000
@@ -1274,6 +1285,14 @@ cdef class _Timedelta(timedelta):
         Notes
         -----
         Any nanosecond resolution will be lost.
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('3D')
+        >>> td
+        Timedelta('3 days 00:00:00')
+        >>> td.to_pytimedelta()
+        datetime.timedelta(days=3)
         """
         if self._creso == NPY_FR_ns:
             return timedelta(microseconds=int(self._value) / 1000)
@@ -1287,6 +1306,14 @@ cdef class _Timedelta(timedelta):
     def to_timedelta64(self) -> np.timedelta64:
         """
         Return a numpy.timedelta64 object with 'ns' precision.
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('3D')
+        >>> td
+        Timedelta('3 days 00:00:00')
+        >>> td.to_timedelta64()
+        numpy.timedelta64(259200000000000,'ns')
         """
         cdef:
             str abbrev = npy_unit_to_abbrev(self._creso)
@@ -1309,6 +1336,14 @@ cdef class _Timedelta(timedelta):
         See Also
         --------
         Series.to_numpy : Similar method for Series.
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('3D')
+        >>> td
+        Timedelta('3 days 00:00:00')
+        >>> td.to_numpy()
+        numpy.timedelta64(259200000000000,'ns')
         """
         if dtype is not None or copy is not False:
             raise ValueError(
@@ -1324,6 +1359,14 @@ cdef class _Timedelta(timedelta):
         ----------
         dtype : str or dtype
             The dtype to view the underlying data as.
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('3D')
+        >>> td
+        Timedelta('3 days 00:00:00')
+        >>> td.view(int)
+        259200000000000
         """
         return np.timedelta64(self._value).view(dtype)
 
@@ -1603,6 +1646,14 @@ cdef class _Timedelta(timedelta):
         Returns
         -------
         Timedelta
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('1001ms')
+        >>> td
+        Timedelta('0 days 00:00:01.001000')
+        >>> td.as_unit('s')
+        Timedelta('0 days 00:00:01')
         """
         dtype = np.dtype(f"m8[{unit}]")
         reso = get_unit_from_dtype(dtype)
@@ -1739,7 +1790,6 @@ class Timedelta(_Timedelta):
                 + int(kwargs.get("milliseconds", 0) * 1_000_000)
                 + seconds
             )
-
         if unit in {"Y", "y", "M"}:
             raise ValueError(
                 "Units 'M', 'Y', and 'y' are no longer supported, as they do not "
@@ -1786,6 +1836,15 @@ class Timedelta(_Timedelta):
                 return NaT
 
             reso = get_datetime64_unit(value)
+            if not (is_supported_unit(reso) or
+                    reso in [NPY_DATETIMEUNIT.NPY_FR_m,
+                             NPY_DATETIMEUNIT.NPY_FR_h,
+                             NPY_DATETIMEUNIT.NPY_FR_D,
+                             NPY_DATETIMEUNIT.NPY_FR_W,
+                             NPY_DATETIMEUNIT.NPY_FR_GENERIC]):
+                err = npy_unit_to_abbrev(reso)
+                raise ValueError(f" cannot construct a Timedelta from a unit {err}")
+
             new_reso = get_supported_reso(reso)
             if reso != NPY_DATETIMEUNIT.NPY_FR_GENERIC:
                 try:
@@ -1875,6 +1934,14 @@ class Timedelta(_Timedelta):
         Raises
         ------
         ValueError if the freq cannot be converted
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('1001ms')
+        >>> td
+        Timedelta('0 days 00:00:01.001000')
+        >>> td.round('s')
+        Timedelta('0 days 00:00:01')
         """
         return self._round(freq, RoundTo.NEAREST_HALF_EVEN)
 
@@ -1886,6 +1953,14 @@ class Timedelta(_Timedelta):
         ----------
         freq : str
             Frequency string indicating the flooring resolution.
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('1001ms')
+        >>> td
+        Timedelta('0 days 00:00:01.001000')
+        >>> td.floor('s')
+        Timedelta('0 days 00:00:01')
         """
         return self._round(freq, RoundTo.MINUS_INFTY)
 
@@ -1897,6 +1972,14 @@ class Timedelta(_Timedelta):
         ----------
         freq : str
             Frequency string indicating the ceiling resolution.
+
+        Examples
+        --------
+        >>> td = pd.Timedelta('1001ms')
+        >>> td
+        Timedelta('0 days 00:00:01.001000')
+        >>> td.ceil('s')
+        Timedelta('0 days 00:00:02')
         """
         return self._round(freq, RoundTo.PLUS_INFTY)
 
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 4f09c0953f3d2..844fc8f0ed187 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -45,6 +45,8 @@ from cpython.object cimport (
 
 import_datetime()
 
+import datetime as dt
+
 from pandas._libs.tslibs cimport ccalendar
 from pandas._libs.tslibs.base cimport ABCTimestamp
 
@@ -1122,6 +1124,19 @@ cdef class _Timestamp(ABCTimestamp):
         Returns
         -------
         Timestamp
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 00:00:00.01')
+        >>> ts
+        Timestamp('2023-01-01 00:00:00.010000')
+        >>> ts.unit
+        'ms'
+        >>> ts = ts.as_unit('s')
+        >>> ts
+        Timestamp('2023-01-01 00:00:00')
+        >>> ts.unit
+        's'
         """
         dtype = np.dtype(f"M8[{unit}]")
         reso = get_unit_from_dtype(dtype)
@@ -1189,7 +1204,16 @@ cdef class _Timestamp(ABCTimestamp):
 
     cpdef to_datetime64(self):
         """
-        Return a numpy.datetime64 object with 'ns' precision.
+        Return a numpy.datetime64 object with same precision.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp(year=2023, month=1, day=1,
+        ...                   hour=10, second=15)
+        >>> ts
+        Timestamp('2023-01-01 10:00:15')
+        >>> ts.to_datetime64()
+        numpy.datetime64('2023-01-01T10:00:15.000000')
         """
         # TODO: find a way to construct dt64 directly from _reso
         abbrev = npy_unit_to_abbrev(self._creso)
@@ -1493,6 +1517,207 @@ class Timestamp(_Timestamp):
             ) from err
         return _dt.strftime(format)
 
+    def ctime(self):
+        """
+        Return ctime() style string.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00.00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.ctime()
+        'Sun Jan  1 10:00:00 2023'
+        """
+        try:
+            _dt = datetime(self.year, self.month, self.day,
+                           self.hour, self.minute, self.second,
+                           self.microsecond, self.tzinfo, fold=self.fold)
+        except ValueError as err:
+            raise NotImplementedError(
+                "ctime not yet supported on Timestamps which "
+                "are outside the range of Python's standard library. "
+                "For now, please call the components you need (such as `.year` "
+                "and `.month`) and construct your string from there."
+            ) from err
+        return _dt.ctime()
+
+    def date(self):
+        """
+        Return date object with same year, month and day.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00.00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.date()
+        datetime.date(2023, 1, 1)
+        """
+        try:
+            _dt = dt.date(self.year, self.month, self.day)
+        except ValueError as err:
+            raise NotImplementedError(
+                "date not yet supported on Timestamps which "
+                "are outside the range of Python's standard library. "
+            ) from err
+        return _dt
+
+    def dst(self):
+        """
+        Return the daylight saving time (DST) adjustment.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2000-06-01 00:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2000-06-01 00:00:00+0200', tz='Europe/Brussels')
+        >>> ts.dst()
+        datetime.timedelta(seconds=3600)
+        """
+        return super().dst()
+
+    def isocalendar(self):
+        """
+        Return a named tuple containing ISO year, week number, and weekday.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.isocalendar()
+        datetime.IsoCalendarDate(year=2022, week=52, weekday=7)
+        """
+        try:
+            _dt = datetime(self.year, self.month, self.day,
+                           self.hour, self.minute, self.second,
+                           self.microsecond, self.tzinfo, fold=self.fold)
+        except ValueError as err:
+            raise NotImplementedError(
+                "isocalendar not yet supported on Timestamps which "
+                "are outside the range of Python's standard library. "
+            ) from err
+        return _dt.isocalendar()
+
+    def tzname(self):
+        """
+        Return time zone name.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00+0100', tz='Europe/Brussels')
+        >>> ts.tzname()
+        'CET'
+        """
+        return super().tzname()
+
+    def utcoffset(self):
+        """
+        Return utc offset.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00+0100', tz='Europe/Brussels')
+        >>> ts.utcoffset()
+        datetime.timedelta(seconds=3600)
+        """
+        return super().utcoffset()
+
+    def utctimetuple(self):
+        """
+        Return UTC time tuple, compatible with time.localtime().
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00+0100', tz='Europe/Brussels')
+        >>> ts.utctimetuple()
+        time.struct_time(tm_year=2023, tm_mon=1, tm_mday=1, tm_hour=9,
+        tm_min=0, tm_sec=0, tm_wday=6, tm_yday=1, tm_isdst=0)
+        """
+        return super().utctimetuple()
+
+    def time(self):
+        """
+        Return time object with same time but with tzinfo=None.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.time()
+        datetime.time(10, 0)
+        """
+        return super().time()
+
+    def timetuple(self):
+        """
+        Return time tuple, compatible with time.localtime().
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.timetuple()
+        time.struct_time(tm_year=2023, tm_mon=1, tm_mday=1,
+        tm_hour=10, tm_min=0, tm_sec=0, tm_wday=6, tm_yday=1, tm_isdst=-1)
+        """
+        try:
+            _dt = datetime(self.year, self.month, self.day,
+                           self.hour, self.minute, self.second,
+                           self.microsecond, self.tzinfo, fold=self.fold)
+        except ValueError as err:
+            raise NotImplementedError(
+                "timetuple not yet supported on Timestamps which "
+                "are outside the range of Python's standard library. "
+            ) from err
+        return _dt.timetuple()
+
+    def timetz(self):
+        """
+        Return time object with same time and tzinfo.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00', tz='Europe/Brussels')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00+0100', tz='Europe/Brussels')
+        >>> ts.timetz()
+        datetime.time(10, 0, tzinfo=<DstTzInfo 'Europe/Brussels' CET+1:00:00 STD>)
+        """
+        return super().timetz()
+
+    def toordinal(self):
+        """
+        Return proleptic Gregorian ordinal. January 1 of year 1 is day 1.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:50')
+        >>> ts
+        Timestamp('2023-01-01 10:00:50')
+        >>> ts.toordinal()
+        738521
+        """
+        try:
+            _dt = datetime(self.year, self.month, self.day,
+                           self.hour, self.minute, self.second,
+                           self.microsecond, self.tzinfo, fold=self.fold)
+        except ValueError as err:
+            raise NotImplementedError(
+                "toordinal not yet supported on Timestamps which "
+                "are outside the range of Python's standard library. "
+            ) from err
+        return _dt.toordinal()
+
     # Issue 25016.
     @classmethod
     def strptime(cls, date_string, format):
@@ -1500,6 +1725,12 @@ class Timestamp(_Timestamp):
         Timestamp.strptime(string, format)
 
         Function is not implemented. Use pd.to_datetime().
+
+        Examples
+        --------
+        >>> pd.Timestamp.strptime("2023-01-01", "%d/%m/%y")
+        Traceback (most recent call last):
+        NotImplementedError
         """
         raise NotImplementedError(
             "Timestamp.strptime() is not implemented. "
@@ -2346,9 +2577,18 @@ default 'raise'
         Return the day of the week represented by the date.
 
         Monday == 1 ... Sunday == 7.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01 10:00:00')
+        >>> ts
+        Timestamp('2023-01-01 10:00:00')
+        >>> ts.isoweekday()
+        7
         """
         # same as super().isoweekday(), but that breaks because of how
         #  we have overridden year, see note in create_timestamp_from_ts
+
         return self.weekday() + 1
 
     def weekday(self):
@@ -2356,6 +2596,14 @@ default 'raise'
         Return the day of the week represented by the date.
 
         Monday == 0 ... Sunday == 6.
+
+        Examples
+        --------
+        >>> ts = pd.Timestamp('2023-01-01')
+        >>> ts
+        Timestamp('2023-01-01  00:00:00')
+        >>> ts.weekday()
+        6
         """
         # same as super().weekday(), but that breaks because of how
         #  we have overridden year, see note in create_timestamp_from_ts
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 7908c9df60df8..de3dd58d3b716 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -97,7 +97,6 @@
 from pandas._testing.contexts import (
     decompress_file,
     ensure_clean,
-    ensure_safe_environment_variables,
     raises_chained_assignment_error,
     set_timezone,
     use_numexpr,
@@ -1104,7 +1103,6 @@ def shares_memory(left, right) -> bool:
     "EMPTY_STRING_PATTERN",
     "ENDIAN",
     "ensure_clean",
-    "ensure_safe_environment_variables",
     "equalContents",
     "external_error_raised",
     "FLOAT_EA_DTYPES",
diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index ba2c8c219dc41..f939bd42add93 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -138,22 +138,6 @@ def ensure_clean(
             path.unlink()
 
 
-@contextmanager
-def ensure_safe_environment_variables() -> Generator[None, None, None]:
-    """
-    Get a context manager to safely set environment variables
-
-    All changes will be undone on close, hence environment variables set
-    within this contextmanager will neither persist nor change global state.
-    """
-    saved_environ = dict(os.environ)
-    try:
-        yield
-    finally:
-        os.environ.clear()
-        os.environ.update(saved_environ)
-
-
 @contextmanager
 def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]:
     """
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index ef511828126f8..dcd49f65fc4cd 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -36,7 +36,7 @@
     "pymysql": "1.0.2",
     "pyarrow": "7.0.0",
     "pyreadstat": "1.1.5",
-    "pytest": "7.0.0",
+    "pytest": "7.3.2",
     "pyxlsb": "1.0.9",
     "s3fs": "2022.05.0",
     "scipy": "1.8.1",
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 97c434d8f35d0..d5dad2b8dc4bd 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -9,6 +9,7 @@
 np_version_under1p22 = _nlv < Version("1.22")
 np_version_gte1p24 = _nlv >= Version("1.24")
 np_version_gte1p24p3 = _nlv >= Version("1.24.3")
+np_version_gte1p25 = _nlv >= Version("1.25")
 is_numpy_dev = _nlv.dev is not None
 _min_numpy_ver = "1.21.6"
 
diff --git a/pandas/conftest.py b/pandas/conftest.py
index fbef2fb272ed6..b2f1377a9fb32 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -30,6 +30,7 @@
 from decimal import Decimal
 import operator
 import os
+from pathlib import Path
 from typing import (
     Callable,
     Hashable,
@@ -134,6 +135,8 @@ def pytest_collection_modifyitems(items, config) -> None:
         ("is_datetime64tz_dtype", "is_datetime64tz_dtype is deprecated"),
         ("is_categorical_dtype", "is_categorical_dtype is deprecated"),
         ("is_sparse", "is_sparse is deprecated"),
+        ("NDFrame.replace", "The 'method' keyword"),
+        ("NDFrame.replace", "Series.replace without 'value'"),
         # Docstring divides by zero to show behavior difference
         ("missing.mask_zero_div_zero", "divide by zero encountered"),
         (
@@ -683,7 +686,7 @@ def index_with_missing(request):
     # GH 35538. Use deep copy to avoid illusive bug on np-dev
     # GHA pipeline that writes into indices_dict despite copy
     ind = indices_dict[request.param].copy(deep=True)
-    vals = ind.values
+    vals = ind.values.copy()
     if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
         # For setting missing values in the top level of MultiIndex
         vals = ind.tolist()
@@ -1165,6 +1168,16 @@ def strict_data_files(pytestconfig):
     return pytestconfig.getoption("--strict-data-files")
 
 
+@pytest.fixture
+def tests_path() -> Path:
+    return Path(__file__).parent / "tests"
+
+
+@pytest.fixture
+def tests_io_data_path(tests_path) -> Path:
+    return tests_path / "io" / "data"
+
+
 @pytest.fixture
 def datapath(strict_data_files: str) -> Callable[..., str]:
     """
diff --git a/pandas/core/_numba/executor.py b/pandas/core/_numba/executor.py
index b5a611560bde7..24599148356fa 100644
--- a/pandas/core/_numba/executor.py
+++ b/pandas/core/_numba/executor.py
@@ -3,6 +3,7 @@
 import functools
 from typing import (
     TYPE_CHECKING,
+    Any,
     Callable,
 )
 
@@ -15,8 +16,86 @@
 
 
 @functools.cache
+def make_looper(func, result_dtype, nopython, nogil, parallel):
+    if TYPE_CHECKING:
+        import numba
+    else:
+        numba = import_optional_dependency("numba")
+
+    @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
+    def column_looper(
+        values: np.ndarray,
+        start: np.ndarray,
+        end: np.ndarray,
+        min_periods: int,
+        *args,
+    ):
+        result = np.empty((values.shape[0], len(start)), dtype=result_dtype)
+        na_positions = {}
+        for i in numba.prange(values.shape[0]):
+            output, na_pos = func(
+                values[i], result_dtype, start, end, min_periods, *args
+            )
+            result[i] = output
+            if len(na_pos) > 0:
+                na_positions[i] = np.array(na_pos)
+        return result, na_positions
+
+    return column_looper
+
+
+default_dtype_mapping: dict[np.dtype, Any] = {
+    np.dtype("int8"): np.int64,
+    np.dtype("int16"): np.int64,
+    np.dtype("int32"): np.int64,
+    np.dtype("int64"): np.int64,
+    np.dtype("uint8"): np.uint64,
+    np.dtype("uint16"): np.uint64,
+    np.dtype("uint32"): np.uint64,
+    np.dtype("uint64"): np.uint64,
+    np.dtype("float32"): np.float64,
+    np.dtype("float64"): np.float64,
+    np.dtype("complex64"): np.complex128,
+    np.dtype("complex128"): np.complex128,
+}
+
+
+# TODO: Preserve complex dtypes
+
+float_dtype_mapping: dict[np.dtype, Any] = {
+    np.dtype("int8"): np.float64,
+    np.dtype("int16"): np.float64,
+    np.dtype("int32"): np.float64,
+    np.dtype("int64"): np.float64,
+    np.dtype("uint8"): np.float64,
+    np.dtype("uint16"): np.float64,
+    np.dtype("uint32"): np.float64,
+    np.dtype("uint64"): np.float64,
+    np.dtype("float32"): np.float64,
+    np.dtype("float64"): np.float64,
+    np.dtype("complex64"): np.float64,
+    np.dtype("complex128"): np.float64,
+}
+
+identity_dtype_mapping: dict[np.dtype, Any] = {
+    np.dtype("int8"): np.int8,
+    np.dtype("int16"): np.int16,
+    np.dtype("int32"): np.int32,
+    np.dtype("int64"): np.int64,
+    np.dtype("uint8"): np.uint8,
+    np.dtype("uint16"): np.uint16,
+    np.dtype("uint32"): np.uint32,
+    np.dtype("uint64"): np.uint64,
+    np.dtype("float32"): np.float32,
+    np.dtype("float64"): np.float64,
+    np.dtype("complex64"): np.complex64,
+    np.dtype("complex128"): np.complex128,
+}
+
+
 def generate_shared_aggregator(
     func: Callable[..., Scalar],
+    dtype_mapping: dict[np.dtype, np.dtype],
     nopython: bool,
     nogil: bool,
     parallel: bool,
@@ -29,6 +108,9 @@ def generate_shared_aggregator(
     ----------
     func : function
         aggregation function to be applied to each column
+    dtype_mapping: dict or None
+        If not None, maps a dtype to a result dtype.
+        Otherwise, will fall back to default mapping.
     nopython : bool
         nopython to be passed into numba.jit
     nogil : bool
@@ -40,22 +122,35 @@ def generate_shared_aggregator(
     -------
     Numba function
     """
-    if TYPE_CHECKING:
-        import numba
-    else:
-        numba = import_optional_dependency("numba")
 
-    @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
-    def column_looper(
-        values: np.ndarray,
-        start: np.ndarray,
-        end: np.ndarray,
-        min_periods: int,
-        *args,
-    ):
-        result = np.empty((len(start), values.shape[1]), dtype=np.float64)
-        for i in numba.prange(values.shape[1]):
-            result[:, i] = func(values[:, i], start, end, min_periods, *args)
+    # A wrapper around the looper function,
+    # to dispatch based on dtype since numba is unable to do that in nopython mode
+
+    # It also post-processes the values by inserting nans where number of observations
+    # is less than min_periods
+    # Cannot do this in numba nopython mode
+    # (you'll run into type-unification error when you cast int -> float)
+    def looper_wrapper(values, start, end, min_periods, **kwargs):
+        result_dtype = dtype_mapping[values.dtype]
+        column_looper = make_looper(func, result_dtype, nopython, nogil, parallel)
+        # Need to unpack kwargs since numba only supports *args
+        result, na_positions = column_looper(
+            values, start, end, min_periods, *kwargs.values()
+        )
+        if result.dtype.kind == "i":
+            # Look if na_positions is not empty
+            # If so, convert the whole block
+            # This is OK since int dtype cannot hold nan,
+            # so if min_periods not satisfied for 1 col, it is not satisfied for
+            # all columns at that index
+            for na_pos in na_positions.values():
+                if len(na_pos) > 0:
+                    result = result.astype("float64")
+                    break
+        # TODO: Optimize this
+        for i, na_pos in na_positions.items():
+            if len(na_pos) > 0:
+                result[i, na_pos] = np.nan
         return result
 
-    return column_looper
+    return looper_wrapper
diff --git a/pandas/core/_numba/kernels/mean_.py b/pandas/core/_numba/kernels/mean_.py
index 725989e093441..2903c7d81bbc0 100644
--- a/pandas/core/_numba/kernels/mean_.py
+++ b/pandas/core/_numba/kernels/mean_.py
@@ -60,10 +60,11 @@ def remove_mean(
 @numba.jit(nopython=True, nogil=True, parallel=False)
 def sliding_mean(
     values: np.ndarray,
+    result_dtype: np.dtype,
     start: np.ndarray,
     end: np.ndarray,
     min_periods: int,
-) -> np.ndarray:
+) -> tuple[np.ndarray, list[int]]:
     N = len(start)
     nobs = 0
     sum_x = 0.0
@@ -75,7 +76,7 @@ def sliding_mean(
         start
     ) and is_monotonic_increasing(end)
 
-    output = np.empty(N, dtype=np.float64)
+    output = np.empty(N, dtype=result_dtype)
 
     for i in range(N):
         s = start[i]
@@ -100,7 +101,7 @@ def sliding_mean(
                     neg_ct,
                     compensation_add,
                     num_consecutive_same_value,
-                    prev_value,
+                    prev_value,  # pyright: ignore[reportGeneralTypeIssues]
                 )
         else:
             for j in range(start[i - 1], s):
@@ -125,7 +126,7 @@ def sliding_mean(
                     neg_ct,
                     compensation_add,
                     num_consecutive_same_value,
-                    prev_value,
+                    prev_value,  # pyright: ignore[reportGeneralTypeIssues]
                 )
 
         if nobs >= min_periods and nobs > 0:
@@ -147,4 +148,8 @@ def sliding_mean(
             neg_ct = 0
             compensation_remove = 0.0
 
-    return output
+    # na_position is empty list since float64 can already hold nans
+    # Do list comprehension, since numba cannot figure out that na_pos is
+    # empty list of ints on its own
+    na_pos = [0 for i in range(0)]
+    return output, na_pos
diff --git a/pandas/core/_numba/kernels/min_max_.py b/pandas/core/_numba/kernels/min_max_.py
index acba66a6e4f63..814deeee9d0d5 100644
--- a/pandas/core/_numba/kernels/min_max_.py
+++ b/pandas/core/_numba/kernels/min_max_.py
@@ -15,14 +15,16 @@
 @numba.jit(nopython=True, nogil=True, parallel=False)
 def sliding_min_max(
     values: np.ndarray,
+    result_dtype: np.dtype,
     start: np.ndarray,
     end: np.ndarray,
     min_periods: int,
     is_max: bool,
-) -> np.ndarray:
+) -> tuple[np.ndarray, list[int]]:
     N = len(start)
     nobs = 0
-    output = np.empty(N, dtype=np.float64)
+    output = np.empty(N, dtype=result_dtype)
+    na_pos = []
     # Use deque once numba supports it
     # https://github.com/numba/numba/issues/7417
     Q: list = []
@@ -64,6 +66,9 @@ def sliding_min_max(
         if Q and curr_win_size > 0 and nobs >= min_periods:
             output[i] = values[Q[0]]
         else:
-            output[i] = np.nan
+            if values.dtype.kind != "i":
+                output[i] = np.nan
+            else:
+                na_pos.append(i)
 
-    return output
+    return output, na_pos
diff --git a/pandas/core/_numba/kernels/sum_.py b/pandas/core/_numba/kernels/sum_.py
index 056897189fe67..e834f1410f51a 100644
--- a/pandas/core/_numba/kernels/sum_.py
+++ b/pandas/core/_numba/kernels/sum_.py
@@ -8,6 +8,8 @@
 """
 from __future__ import annotations
 
+from typing import Any
+
 import numba
 import numpy as np
 
@@ -16,13 +18,13 @@
 
 @numba.jit(nopython=True, nogil=True, parallel=False)
 def add_sum(
-    val: float,
+    val: Any,
     nobs: int,
-    sum_x: float,
-    compensation: float,
+    sum_x: Any,
+    compensation: Any,
     num_consecutive_same_value: int,
-    prev_value: float,
-) -> tuple[int, float, float, int, float]:
+    prev_value: Any,
+) -> tuple[int, Any, Any, int, Any]:
     if not np.isnan(val):
         nobs += 1
         y = val - compensation
@@ -41,8 +43,8 @@ def add_sum(
 
 @numba.jit(nopython=True, nogil=True, parallel=False)
 def remove_sum(
-    val: float, nobs: int, sum_x: float, compensation: float
-) -> tuple[int, float, float]:
+    val: Any, nobs: int, sum_x: Any, compensation: Any
+) -> tuple[int, Any, Any]:
     if not np.isnan(val):
         nobs -= 1
         y = -val - compensation
@@ -55,21 +57,29 @@ def remove_sum(
 @numba.jit(nopython=True, nogil=True, parallel=False)
 def sliding_sum(
     values: np.ndarray,
+    result_dtype: np.dtype,
     start: np.ndarray,
     end: np.ndarray,
     min_periods: int,
-) -> np.ndarray:
+) -> tuple[np.ndarray, list[int]]:
+    dtype = values.dtype
+
+    na_val: object = np.nan
+    if dtype.kind == "i":
+        na_val = 0
+
     N = len(start)
     nobs = 0
-    sum_x = 0.0
-    compensation_add = 0.0
-    compensation_remove = 0.0
+    sum_x = 0
+    compensation_add = 0
+    compensation_remove = 0
+    na_pos = []
 
     is_monotonic_increasing_bounds = is_monotonic_increasing(
         start
     ) and is_monotonic_increasing(end)
 
-    output = np.empty(N, dtype=np.float64)
+    output = np.empty(N, dtype=result_dtype)
 
     for i in range(N):
         s = start[i]
@@ -119,20 +129,22 @@ def sliding_sum(
                 )
 
         if nobs == 0 == min_periods:
-            result = 0.0
+            result: object = 0
         elif nobs >= min_periods:
             if num_consecutive_same_value >= nobs:
                 result = prev_value * nobs
             else:
                 result = sum_x
         else:
-            result = np.nan
+            result = na_val
+            if dtype.kind == "i":
+                na_pos.append(i)
 
         output[i] = result
 
         if not is_monotonic_increasing_bounds:
             nobs = 0
-            sum_x = 0.0
-            compensation_remove = 0.0
+            sum_x = 0
+            compensation_remove = 0
 
-    return output
+    return output, na_pos
diff --git a/pandas/core/_numba/kernels/var_.py b/pandas/core/_numba/kernels/var_.py
index d3243f4928dca..763daada73b4d 100644
--- a/pandas/core/_numba/kernels/var_.py
+++ b/pandas/core/_numba/kernels/var_.py
@@ -68,11 +68,12 @@ def remove_var(
 @numba.jit(nopython=True, nogil=True, parallel=False)
 def sliding_var(
     values: np.ndarray,
+    result_dtype: np.dtype,
     start: np.ndarray,
     end: np.ndarray,
     min_periods: int,
     ddof: int = 1,
-) -> np.ndarray:
+) -> tuple[np.ndarray, list[int]]:
     N = len(start)
     nobs = 0
     mean_x = 0.0
@@ -85,7 +86,7 @@ def sliding_var(
         start
     ) and is_monotonic_increasing(end)
 
-    output = np.empty(N, dtype=np.float64)
+    output = np.empty(N, dtype=result_dtype)
 
     for i in range(N):
         s = start[i]
@@ -110,7 +111,7 @@ def sliding_var(
                     ssqdm_x,
                     compensation_add,
                     num_consecutive_same_value,
-                    prev_value,
+                    prev_value,  # pyright: ignore[reportGeneralTypeIssues]
                 )
         else:
             for j in range(start[i - 1], s):
@@ -135,7 +136,7 @@ def sliding_var(
                     ssqdm_x,
                     compensation_add,
                     num_consecutive_same_value,
-                    prev_value,
+                    prev_value,  # pyright: ignore[reportGeneralTypeIssues]
                 )
 
         if nobs >= min_periods and nobs > ddof:
@@ -154,4 +155,8 @@ def sliding_var(
             ssqdm_x = 0.0
             compensation_remove = 0.0
 
-    return output
+    # na_position is empty list since float64 can already hold nans
+    # Do list comprehension, since numba cannot figure out that na_pos is
+    # empty list of ints on its own
+    na_pos = [0 for i in range(0)]
+    return output, na_pos
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 3a0fa1261701c..c3574829f9b0e 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -838,6 +838,31 @@ def value_counts(
     -------
     Series
     """
+    warnings.warn(
+        # GH#53493
+        "pandas.value_counts is deprecated and will be removed in a "
+        "future version. Use pd.Series(obj).value_counts() instead.",
+        FutureWarning,
+        stacklevel=find_stack_level(),
+    )
+    return value_counts_internal(
+        values,
+        sort=sort,
+        ascending=ascending,
+        normalize=normalize,
+        bins=bins,
+        dropna=dropna,
+    )
+
+
+def value_counts_internal(
+    values,
+    sort: bool = True,
+    ascending: bool = False,
+    normalize: bool = False,
+    bins=None,
+    dropna: bool = True,
+) -> Series:
     from pandas import (
         Index,
         Series,
@@ -1678,8 +1703,8 @@ def union_with_duplicates(
     """
     from pandas import Series
 
-    l_count = value_counts(lvals, dropna=False)
-    r_count = value_counts(rvals, dropna=False)
+    l_count = value_counts_internal(lvals, dropna=False)
+    r_count = value_counts_internal(rvals, dropna=False)
     l_count, r_count = l_count.align(r_count, fill_value=0)
     final_count = np.maximum(l_count.values, r_count.values)
     final_count = Series(final_count, index=l_count.index, dtype="int", copy=False)
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index d93ecc087844c..0100c17805d76 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -48,7 +48,7 @@
 from pandas.core.algorithms import (
     take,
     unique,
-    value_counts,
+    value_counts_internal as value_counts,
 )
 from pandas.core.array_algos.quantile import quantile_with_mask
 from pandas.core.array_algos.transforms import shift
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 817d5d0932744..85a75fff25ebd 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -347,9 +347,9 @@ def _box_pa(
         -------
         pa.Array or pa.ChunkedArray or pa.Scalar
         """
-        if is_list_like(value):
-            return cls._box_pa_array(value, pa_type)
-        return cls._box_pa_scalar(value, pa_type)
+        if isinstance(value, pa.Scalar) or not is_list_like(value):
+            return cls._box_pa_scalar(value, pa_type)
+        return cls._box_pa_array(value, pa_type)
 
     @classmethod
     def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar:
@@ -1131,13 +1131,7 @@ def take(
         it's called by :meth:`Series.reindex`, or any other method
         that causes realignment, with a `fill_value`.
         """
-        # TODO: Remove once we got rid of the (indices < 0) check
-        if not is_array_like(indices):
-            indices_array = np.asanyarray(indices)
-        else:
-            # error: Incompatible types in assignment (expression has type
-            # "Sequence[int]", variable has type "ndarray")
-            indices_array = indices  # type: ignore[assignment]
+        indices_array = np.asanyarray(indices)
 
         if len(self._pa_array) == 0 and (indices_array >= 0).any():
             raise IndexError("cannot do a non-empty take")
@@ -1549,6 +1543,24 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
 
         return result.as_py()
 
+    def _explode(self):
+        """
+        See Series.explode.__doc__.
+        """
+        values = self
+        counts = pa.compute.list_value_length(values._pa_array)
+        counts = counts.fill_null(1).to_numpy()
+        fill_value = pa.scalar([None], type=self._pa_array.type)
+        mask = counts == 0
+        if mask.any():
+            values = values.copy()
+            values[mask] = fill_value
+            counts = counts.copy()
+            counts[mask] = 1
+        values = values.fillna(fill_value)
+        values = type(self)(pa.compute.list_flatten(values._pa_array))
+        return values, counts
+
     def __setitem__(self, key, value) -> None:
         """Set one or more values inplace.
 
@@ -1591,10 +1603,10 @@ def __setitem__(self, key, value) -> None:
                 raise IndexError(
                     f"index {key} is out of bounds for axis 0 with size {n}"
                 )
-            if is_list_like(value):
-                raise ValueError("Length of indexer and values mismatch")
-            elif isinstance(value, pa.Scalar):
+            if isinstance(value, pa.Scalar):
                 value = value.as_py()
+            elif is_list_like(value):
+                raise ValueError("Length of indexer and values mismatch")
             chunks = [
                 *self._pa_array[:key].chunks,
                 pa.array([value], type=self._pa_array.type, from_pandas=True),
@@ -2073,7 +2085,12 @@ def _str_get(self, i: int):
         return type(self)(result)
 
     def _str_join(self, sep: str):
-        return type(self)(pc.binary_join(self._pa_array, sep))
+        if pa.types.is_string(self._pa_array.type):
+            result = self._apply_elementwise(list)
+            result = pa.chunked_array(result, type=pa.list_(pa.string()))
+        else:
+            result = self._pa_array
+        return type(self)(pc.binary_join(result, sep))
 
     def _str_partition(self, sep: str, expand: bool):
         predicate = lambda val: val.partition(sep)
@@ -2216,17 +2233,19 @@ def _str_findall(self, pat: str, flags: int = 0):
         return type(self)(pa.chunked_array(result))
 
     def _str_get_dummies(self, sep: str = "|"):
-        split = pc.split_pattern(self._pa_array, sep).combine_chunks()
-        uniques = split.flatten().unique()
+        split = pc.split_pattern(self._pa_array, sep)
+        flattened_values = pc.list_flatten(split)
+        uniques = flattened_values.unique()
         uniques_sorted = uniques.take(pa.compute.array_sort_indices(uniques))
-        result_data = []
-        for lst in split.to_pylist():
-            if lst is None:
-                result_data.append([False] * len(uniques_sorted))
-            else:
-                res = pc.is_in(uniques_sorted, pa.array(set(lst)))
-                result_data.append(res.to_pylist())
-        result = type(self)(pa.array(result_data))
+        lengths = pc.list_value_length(split).fill_null(0).to_numpy()
+        n_rows = len(self)
+        n_cols = len(uniques)
+        indices = pc.index_in(flattened_values, uniques_sorted).to_numpy()
+        indices = indices + np.arange(n_rows).repeat(lengths) * n_cols
+        dummies = np.zeros(n_rows * n_cols, dtype=np.bool_)
+        dummies[indices] = True
+        dummies = dummies.reshape((n_rows, n_cols))
+        result = type(self)(pa.array(list(dummies)))
         return result, uniques_sorted.to_pylist()
 
     def _str_index(self, sub: str, start: int = 0, end: int | None = None):
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 27eb7994d3ccb..4ebc312985aed 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -993,7 +993,6 @@ def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
         Returns
         -------
         values : ndarray
-
             An array suitable for factorization. This should maintain order
             and be a supported dtype (Float64, Int64, UInt64, String, Object).
             By default, the extension array is cast to object dtype.
@@ -1002,6 +1001,12 @@ def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
             as NA in the factorization routines, so it will be coded as
             `-1` and not included in `uniques`. By default,
             ``np.nan`` is used.
+
+        Notes
+        -----
+        The values returned by this method are also used in
+        :func:`pandas.util.hash_pandas_object`. If needed, this can be
+        overridden in the ``self._hash_pandas_object()`` method.
         """
         return self.astype(object), np.nan
 
@@ -1449,7 +1454,7 @@ def _hash_pandas_object(
         """
         Hook for hash_pandas_object.
 
-        Default is likely non-performant.
+        Default is to use the values returned by _values_for_factorize.
 
         Parameters
         ----------
@@ -1463,7 +1468,7 @@ def _hash_pandas_object(
         """
         from pandas.core.util.hashing import hash_array
 
-        values = self.to_numpy(copy=False)
+        values, _ = self._values_for_factorize()
         return hash_array(
             values, encoding=encoding, hash_key=hash_key, categorize=categorize
         )
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 97ed64856fe3b..a226c71934a64 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -481,6 +481,15 @@ def __init__(
     def dtype(self) -> CategoricalDtype:
         """
         The :class:`~pandas.api.types.CategoricalDtype` for this instance.
+
+        Examples
+        --------
+        >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+        >>> cat
+        ['a', 'b']
+        Categories (2, object): ['a' < 'b']
+        >>> cat.dtype
+        CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)
         """
         return self._dtype
 
@@ -751,6 +760,9 @@ def categories(self) -> Index:
 
         Examples
         --------
+
+        For Series:
+
         >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
         >>> ser.cat.categories
         Index(['a', 'b', 'c'], dtype='object')
@@ -759,6 +771,12 @@ def categories(self) -> Index:
         >>> ser = pd.Series(raw_cat)
         >>> ser.cat.categories
         Index(['b', 'c', 'd'], dtype='object')
+
+        For Categorical:
+
+        >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+        >>> cat.categories
+        Index(['a', 'b'], dtype='object')
         """
         return self.dtype.categories
 
@@ -769,6 +787,9 @@ def ordered(self) -> Ordered:
 
         Examples
         --------
+
+        For Series:
+
         >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
         >>> ser.cat.ordered
         False
@@ -777,6 +798,16 @@ def ordered(self) -> Ordered:
         >>> ser = pd.Series(raw_cat)
         >>> ser.cat.ordered
         True
+
+        For Categorical:
+
+        >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+        >>> cat.ordered
+        True
+
+        >>> cat = pd.Categorical(['a', 'b'], ordered=False)
+        >>> cat.ordered
+        False
         """
         return self.dtype.ordered
 
@@ -795,6 +826,12 @@ def codes(self) -> np.ndarray:
         -------
         ndarray[int]
             A non-writable view of the `codes` array.
+
+        Examples
+        --------
+        >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+        >>> cat.codes
+        array([0, 1], dtype=int8)
         """
         v = self._codes.view()
         v.flags.writeable = False
@@ -1492,6 +1529,16 @@ def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
             A numpy array of either the specified dtype or,
             if dtype==None (default), the same dtype as
             categorical.categories.dtype.
+
+        Examples
+        --------
+
+        >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+
+        The following calls ``cat.__array__``
+
+        >>> np.asarray(cat)
+        array(['a', 'b'], dtype=object)
         """
         ret = take_nd(self.categories._values, self._codes)
         if dtype and np.dtype(dtype) != self.categories.dtype:
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 8c57496e092c4..ea085b3d1f6ab 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -106,6 +106,7 @@
 
 from pandas.core import (
     algorithms,
+    missing,
     nanops,
 )
 from pandas.core.algorithms import (
@@ -142,6 +143,7 @@
 from pandas.tseries import frequencies
 
 if TYPE_CHECKING:
+    from pandas import Index
     from pandas.core.arrays import (
         DatetimeArray,
         PeriodArray,
@@ -2228,6 +2230,46 @@ def copy(self, order: str = "C") -> Self:
         new_obj._freq = self.freq
         return new_obj
 
+    def interpolate(
+        self,
+        *,
+        method,
+        axis: int,
+        index: Index | None,
+        limit,
+        limit_direction,
+        limit_area,
+        fill_value,
+        inplace: bool,
+        **kwargs,
+    ) -> Self:
+        """
+        See NDFrame.interpolate.__doc__.
+        """
+        # NB: we return type(self) even if inplace=True
+        if method != "linear":
+            raise NotImplementedError
+
+        if inplace:
+            out_data = self._ndarray
+        else:
+            out_data = self._ndarray.copy()
+
+        missing.interpolate_array_2d(
+            out_data,
+            method=method,
+            axis=axis,
+            index=index,
+            limit=limit,
+            limit_direction=limit_direction,
+            limit_area=limit_area,
+            fill_value=fill_value,
+            **kwargs,
+        )
+        if inplace:
+            return self
+        return type(self)._simple_new(out_data, dtype=self.dtype)
+
 
 # -------------------------------------------------------------------
 # Shared Constructor Helpers
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 28b56a220f005..d6afba8c34904 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -183,6 +183,14 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):  # type: ignore[misc]
     Methods
     -------
     None
+
+    Examples
+    --------
+    >>> pd.arrays.DatetimeArray(pd.DatetimeIndex(['2023-01-01', '2023-01-02']),
+    ...                         freq='D')
+    <DatetimeArray>
+    ['2023-01-01 00:00:00', '2023-01-02 00:00:00']
+    Length: 2, dtype: datetime64[ns]
     """
 
     _typ = "datetimearray"
@@ -1145,13 +1153,13 @@ def to_period(self, freq=None) -> PeriodArray:
 
         Parameters
         ----------
-        freq : str or Offset, optional
-            One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
-            or an Offset object. Will be inferred by default.
+        freq : str or Period, optional
+            One of pandas' :ref:`period aliases <timeseries.period_aliases>`
+            or an Period object. Will be inferred by default.
 
         Returns
         -------
-        PeriodArray/Index
+        PeriodArray/PeriodIndex
 
         Raises
         ------
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 2842d8267b7c6..7f874a07341eb 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -79,7 +79,7 @@
     isin,
     take,
     unique,
-    value_counts,
+    value_counts_internal as value_counts,
 )
 from pandas.core.arrays.base import (
     ExtensionArray,
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 702180b5d779a..113f22ad968bc 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -19,6 +19,7 @@
 
 from pandas.core import (
     arraylike,
+    missing,
     nanops,
     ops,
 )
@@ -33,9 +34,12 @@
         Dtype,
         NpDtype,
         Scalar,
+        Self,
         npt,
     )
 
+    from pandas import Index
+
 
 # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
 # incompatible with definition in base class "ExtensionArray"
@@ -220,6 +224,43 @@ def _values_for_factorize(self) -> tuple[np.ndarray, float | None]:
             fv = np.nan
         return self._ndarray, fv
 
+    def interpolate(
+        self,
+        *,
+        method,
+        axis: int,
+        index: Index | None,
+        limit,
+        limit_direction,
+        limit_area,
+        fill_value,
+        inplace: bool,
+        **kwargs,
+    ) -> Self:
+        """
+        See NDFrame.interpolate.__doc__.
+        """
+        # NB: we return type(self) even if inplace=True
+        if inplace:
+            out_data = self._ndarray
+        else:
+            out_data = self._ndarray.copy()
+
+        missing.interpolate_array_2d(
+            out_data,
+            method=method,
+            axis=axis,
+            index=index,
+            limit=limit,
+            limit_direction=limit_direction,
+            limit_area=limit_area,
+            fill_value=fill_value,
+            **kwargs,
+        )
+        if inplace:
+            return self
+        return type(self)._simple_new(out_data, dtype=self.dtype)
+
     # ------------------------------------------------------------------------
     # Reductions
 
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 266dda52c6d0d..c9c2d258a9a16 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -163,6 +163,14 @@ class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin):  # type: ignore[misc]
 
     The `freq` indicates the span covered by each element of the array.
     All elements in the PeriodArray have the same `freq`.
+
+    Examples
+    --------
+    >>> pd.arrays.PeriodArray(pd.PeriodIndex(['2023-01-01',
+    ...                                       '2023-01-02'], freq='D'))
+    <PeriodArray>
+    ['2023-01-01', '2023-01-02']
+    Length: 2, dtype: period[D]
     """
 
     # array priority higher than numpy scalars
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 16e7835a7183d..1872176394d02 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -42,7 +42,6 @@
     maybe_box_datetimelike,
 )
 from pandas.core.dtypes.common import (
-    is_array_like,
     is_bool_dtype,
     is_integer,
     is_list_like,
@@ -428,19 +427,16 @@ def __init__(
             # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
             data = np.array([], dtype=dtype)  # type: ignore[arg-type]
 
-        if not is_array_like(data):
-            try:
-                # probably shared code in sanitize_series
-
-                data = sanitize_array(data, index=None)
-            except ValueError:
-                # NumPy may raise a ValueError on data like [1, []]
-                # we retry with object dtype here.
-                if dtype is None:
-                    dtype = np.dtype(object)
-                    data = np.atleast_1d(np.asarray(data, dtype=dtype))
-                else:
-                    raise
+        try:
+            data = sanitize_array(data, index=None)
+        except ValueError:
+            # NumPy may raise a ValueError on data like [1, []]
+            # we retry with object dtype here.
+            if dtype is None:
+                dtype = np.dtype(object)
+                data = np.atleast_1d(np.asarray(data, dtype=dtype))
+            else:
+                raise
 
         if copy:
             # TODO: avoid double copy when dtype forces cast.
@@ -769,7 +765,12 @@ def fillna(
             )
             new_values = np.asarray(self)
             # interpolate_2d modifies new_values inplace
-            interpolate_2d(new_values, method=method, limit=limit)
+            # error: Argument "method" to "interpolate_2d" has incompatible type
+            # "Literal['backfill', 'bfill', 'ffill', 'pad']"; expected
+            # "Literal['pad', 'backfill']"
+            interpolate_2d(
+                new_values, method=method, limit=limit  # type: ignore[arg-type]
+            )
             return type(self)(new_values, fill_value=self.fill_value)
 
         else:
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index d2cd183f26173..a6579879cab96 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -496,7 +496,7 @@ def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
         return self._wrap_reduction_result(axis, result)
 
     def value_counts(self, dropna: bool = True) -> Series:
-        from pandas import value_counts
+        from pandas.core.algorithms import value_counts_internal as value_counts
 
         result = value_counts(self._ndarray, dropna=dropna).astype("Int64")
         result.index = result.index.astype(self.dtype)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index bf62c327de2f0..2516e687cdbab 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -132,6 +132,13 @@ class TimedeltaArray(dtl.TimelikeOps):
     Methods
     -------
     None
+
+    Examples
+    --------
+    >>> pd.arrays.TimedeltaArray(pd.TimedeltaIndex(['1H', '2H']))
+    <TimedeltaArray>
+    ['0 days 01:00:00', '0 days 02:00:00']
+    Length: 2, dtype: timedelta64[ns]
     """
 
     _typ = "timedeltaarray"
diff --git a/pandas/core/base.py b/pandas/core/base.py
index f66abaa17d8a7..d4a808f4d7dd1 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -958,7 +958,7 @@ def value_counts(
         NaN    1
         Name: count, dtype: int64
         """
-        return algorithms.value_counts(
+        return algorithms.value_counts_internal(
             self,
             sort=sort,
             ascending=ascending,
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 831b368f58225..f65cb94df293e 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -79,6 +79,8 @@
     notna,
 )
 
+from pandas.io._util import _arrow_dtype_mapping
+
 if TYPE_CHECKING:
     from pandas._typing import (
         ArrayLike,
@@ -562,9 +564,16 @@ def maybe_promote(dtype: np.dtype, fill_value=np.nan):
         If fill_value is a non-scalar and dtype is not object.
     """
     orig = fill_value
+    orig_is_nat = False
     if checknull(fill_value):
         # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740
         #  avoid cache misses with NaN/NaT values that are not singletons
+        if fill_value is not NA:
+            try:
+                orig_is_nat = np.isnat(fill_value)
+            except TypeError:
+                pass
+
         fill_value = _canonical_nans.get(type(fill_value), fill_value)
 
     # for performance, we are using a cached version of the actual implementation
@@ -580,8 +589,10 @@ def maybe_promote(dtype: np.dtype, fill_value=np.nan):
         # if fill_value is not hashable (required for caching)
         dtype, fill_value = _maybe_promote(dtype, fill_value)
 
-    if dtype == _dtype_obj and orig is not None:
-        # GH#51592 restore our potentially non-canonical fill_value
+    if (dtype == _dtype_obj and orig is not None) or (
+        orig_is_nat and np.datetime_data(orig)[0] != "ns"
+    ):
+        # GH#51592,53497 restore our potentially non-canonical fill_value
         fill_value = orig
     return dtype, fill_value
 
@@ -1110,6 +1121,9 @@ def convert_dtypes(
             pa_type = to_pyarrow_type(base_dtype)
             if pa_type is not None:
                 inferred_dtype = ArrowDtype(pa_type)
+    elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype):
+        # GH 53648
+        inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype]
 
     # error: Incompatible return value type (got "Union[str, Union[dtype[Any],
     # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]")
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 34a7c0c0fc10f..50fc5231a3a76 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -610,6 +610,12 @@ def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:
     def categories(self) -> Index:
         """
         An ``Index`` containing the unique categories allowed.
+
+        Examples
+        --------
+        >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
+        >>> cat_type.categories
+        Index(['a', 'b'], dtype='object')
         """
         return self._categories
 
@@ -617,6 +623,16 @@ def categories(self) -> Index:
     def ordered(self) -> Ordered:
         """
         Whether the categories have an ordered relationship.
+
+        Examples
+        --------
+        >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
+        >>> cat_type.ordered
+        True
+
+        >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False)
+        >>> cat_type.ordered
+        False
         """
         return self._ordered
 
@@ -1665,17 +1681,6 @@ def _check_fill_value(self):
                     FutureWarning,
                     stacklevel=find_stack_level(),
                 )
-        elif isinstance(self.subtype, CategoricalDtype):
-            # TODO: is this even supported?  It is reached in
-            #  test_dtype_sparse_with_fill_value_not_present_in_data
-            if self.subtype.categories is None or val not in self.subtype.categories:
-                warnings.warn(
-                    "Allowing arbitrary scalar fill_value in SparseDtype is "
-                    "deprecated. In a future version, the fill_value must be "
-                    "a valid value for the SparseDtype.subtype.",
-                    FutureWarning,
-                    stacklevel=find_stack_level(),
-                )
         else:
             dummy = np.empty(0, dtype=self.subtype)
             dummy = ensure_wrapped_if_datetimelike(dummy)
@@ -2214,10 +2219,13 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
         # Mirrors BaseMaskedDtype
         from pandas.core.dtypes.cast import find_common_type
 
+        null_dtype = type(self)(pa.null())
+
         new_dtype = find_common_type(
             [
                 dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype
                 for dtype in dtypes
+                if dtype != null_dtype
             ]
         )
         if not isinstance(new_dtype, np.dtype):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3e6c89139d06d..635073efe9357 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -673,9 +673,9 @@ def __init__(
         manager = get_option("mode.data_manager")
 
         # GH47215
-        if index is not None and isinstance(index, set):
+        if isinstance(index, set):
             raise ValueError("index cannot be a set")
-        if columns is not None and isinstance(columns, set):
+        if isinstance(columns, set):
             raise ValueError("columns cannot be a set")
 
         if copy is None:
@@ -2981,6 +2981,7 @@ def to_orc(
         1     2     3
 
         If you want to get a buffer to the orc content you can write it to io.BytesIO
+
         >>> import io
         >>> b = io.BytesIO(df.to_orc())  # doctest: +SKIP
         >>> b.seek(0)  # doctest: +SKIP
@@ -3915,7 +3916,6 @@ def isetitem(self, loc, value) -> None:
         In cases where ``frame.columns`` is unique, this is equivalent to
         ``frame[frame.columns[i]] = value``.
         """
-        using_cow = using_copy_on_write()
         if isinstance(value, DataFrame):
             if is_integer(loc):
                 loc = [loc]
@@ -3927,13 +3927,11 @@ def isetitem(self, loc, value) -> None:
                 )
 
             for i, idx in enumerate(loc):
-                arraylike, refs = self._sanitize_column(
-                    value.iloc[:, i], using_cow=using_cow
-                )
+                arraylike, refs = self._sanitize_column(value.iloc[:, i])
                 self._iset_item_mgr(idx, arraylike, inplace=False, refs=refs)
             return
 
-        arraylike, refs = self._sanitize_column(value, using_cow=using_cow)
+        arraylike, refs = self._sanitize_column(value)
         self._iset_item_mgr(loc, arraylike, inplace=False, refs=refs)
 
     def __setitem__(self, key, value):
@@ -4170,7 +4168,7 @@ def _set_item(self, key, value) -> None:
         Series/TimeSeries will be conformed to the DataFrames index to
         ensure homogeneity.
         """
-        value, refs = self._sanitize_column(value, using_cow=using_copy_on_write())
+        value, refs = self._sanitize_column(value)
 
         if (
             key in self.columns
@@ -4813,7 +4811,7 @@ def insert(
         elif isinstance(value, DataFrame):
             value = value.iloc[:, 0]
 
-        value, refs = self._sanitize_column(value, using_cow=using_copy_on_write())
+        value, refs = self._sanitize_column(value)
         self._mgr.insert(loc, column, value, refs=refs)
 
     def assign(self, **kwargs) -> DataFrame:
@@ -4884,9 +4882,7 @@ def assign(self, **kwargs) -> DataFrame:
             data[k] = com.apply_if_callable(v, data)
         return data
 
-    def _sanitize_column(
-        self, value, using_cow: bool = False
-    ) -> tuple[ArrayLike, BlockValuesRefs | None]:
+    def _sanitize_column(self, value) -> tuple[ArrayLike, BlockValuesRefs | None]:
         """
         Ensures new columns (which go into the BlockManager as new blocks) are
         always copied (or a reference is being tracked to them under CoW)
@@ -4907,7 +4903,7 @@ def _sanitize_column(
         if is_dict_like(value):
             if not isinstance(value, Series):
                 value = Series(value)
-            return _reindex_for_setitem(value, self.index, using_cow=using_cow)
+            return _reindex_for_setitem(value, self.index)
 
         if is_list_like(value):
             com.require_length_match(value, self.index)
@@ -6956,7 +6952,7 @@ def sort_index(
 
     def value_counts(
         self,
-        subset: Sequence[Hashable] | None = None,
+        subset: IndexLabel | None = None,
         normalize: bool = False,
         sort: bool = True,
         ascending: bool = False,
@@ -7080,8 +7076,8 @@ def value_counts(
         if normalize:
             counts /= counts.sum()
 
-        # Force MultiIndex for single column
-        if is_list_like(subset) and len(subset) == 1:
+        # Force MultiIndex for a list_like subset with a single column
+        if is_list_like(subset) and len(subset) == 1:  # type: ignore[arg-type]
             counts.index = MultiIndex.from_arrays(
                 [counts.index], names=[counts.index.name]
             )
@@ -8348,7 +8344,13 @@ def combiner(x, y):
 
             return expressions.where(mask, y_values, x_values)
 
-        combined = self.combine(other, combiner, overwrite=False)
+        if len(other) == 0:
+            combined = self.reindex(
+                self.columns.append(other.columns.difference(self.columns)), axis=1
+            )
+            combined = combined.astype(other.dtypes)
+        else:
+            combined = self.combine(other, combiner, overwrite=False)
 
         dtypes = {
             col: find_common_type([self.dtypes[col], other.dtypes[col]])
@@ -8827,22 +8829,22 @@ def pivot(self, *, columns, index=lib.NoDefault, values=lib.NoDefault) -> DataFr
         values : list-like or scalar, optional
             Column or columns to aggregate.
         index : column, Grouper, array, or list of the previous
-            If an array is passed, it must be the same length as the data. The
-            list can contain any of the other types (except list).
-            Keys to group by on the pivot table index.  If an array is passed,
-            it is being used as the same manner as column values.
+            Keys to group by on the pivot table index. If a list is passed,
+            it can contain any of the other types (except list). If an array is
+            passed, it must be the same length as the data and will be used in
+            the same manner as column values.
         columns : column, Grouper, array, or list of the previous
-            If an array is passed, it must be the same length as the data. The
-            list can contain any of the other types (except list).
-            Keys to group by on the pivot table column.  If an array is passed,
-            it is being used as the same manner as column values.
+            Keys to group by on the pivot table column. If a list is passed,
+            it can contain any of the other types (except list). If an array is
+            passed, it must be the same length as the data and will be used in
+            the same manner as column values.
         aggfunc : function, list of functions, dict, default numpy.mean
-            If list of functions passed, the resulting pivot table will have
+            If a list of functions is passed, the resulting pivot table will have
             hierarchical columns whose top level are the function names
-            (inferred from the function objects themselves)
-            If dict is passed, the key is column to aggregate and value
-            is function or list of functions. If ``margin=True``,
-            aggfunc will be used to calculate the partial aggregates.
+            (inferred from the function objects themselves).
+            If a dict is passed, the key is column to aggregate and the value is
+            function or list of functions. If ``margin=True``, aggfunc will be
+            used to calculate the partial aggregates.
         fill_value : scalar, default None
             Value to replace missing values with (in the resulting pivot table,
             after aggregation).
@@ -8991,7 +8993,7 @@ def pivot_table(
             sort=sort,
         )
 
-    def stack(self, level: Level = -1, dropna: bool = True, sort: bool = True):
+    def stack(self, level: IndexLabel = -1, dropna: bool = True, sort: bool = True):
         """
         Stack the prescribed level(s) from columns to index.
 
@@ -9296,7 +9298,7 @@ def explode(
 
         return result.__finalize__(self, method="explode")
 
-    def unstack(self, level: Level = -1, fill_value=None, sort: bool = True):
+    def unstack(self, level: IndexLabel = -1, fill_value=None, sort: bool = True):
         """
         Pivot a level of the (necessarily hierarchical) index labels.
 
@@ -11923,12 +11925,12 @@ def _from_nested_dict(data) -> collections.defaultdict:
 
 
 def _reindex_for_setitem(
-    value: DataFrame | Series, index: Index, using_cow: bool = False
+    value: DataFrame | Series, index: Index
 ) -> tuple[ArrayLike, BlockValuesRefs | None]:
     # reindex if necessary
 
     if value.index.equals(index) or not len(index):
-        if using_cow and isinstance(value, Series):
+        if using_copy_on_write() and isinstance(value, Series):
             return value._values, value._references
         return value._values.copy(), None
 
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 91083f4018c06..bf2bc406920e1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -47,6 +47,7 @@
     AlignJoin,
     AnyArrayLike,
     ArrayLike,
+    Axes,
     Axis,
     AxisInt,
     CompressionOptions,
@@ -143,6 +144,7 @@
     arraylike,
     common,
     indexing,
+    missing,
     nanops,
     sample,
 )
@@ -270,7 +272,7 @@ def __init__(self, data: Manager) -> None:
     def _init_mgr(
         cls,
         mgr: Manager,
-        axes,
+        axes: dict[Literal["index", "columns"], Axes | None],
         dtype: DtypeObj | None = None,
         copy: bool_t = False,
     ) -> Manager:
@@ -2899,6 +2901,36 @@ def to_sql(
         ...    conn.execute(text("SELECT * FROM users")).fetchall()
         [(0, 'User 6'), (1, 'User 7')]
 
+        Use ``method`` to define a callable insertion method to do nothing
+        if there's a primary key conflict on a table in a PostgreSQL database.
+
+        >>> from sqlalchemy.dialects.postgresql import insert
+        >>> def insert_on_conflict_nothing(table, conn, keys, data_iter):
+        ...     # "a" is the primary key in "conflict_table"
+        ...     data = [dict(zip(keys, row)) for row in data_iter]
+        ...     stmt = insert(table.table).values(data).on_conflict_do_nothing(index_elements=["a"])
+        ...     result = conn.execute(stmt)
+        ...     return result.rowcount
+        >>> df_conflict.to_sql("conflict_table", conn, if_exists="append", method=insert_on_conflict_nothing)  # doctest: +SKIP
+        0
+
+        For MySQL, a callable to update columns ``b`` and ``c`` if there's a conflict
+        on a primary key.
+
+        >>> from sqlalchemy.dialects.mysql import insert
+        >>> def insert_on_conflict_update(table, conn, keys, data_iter):
+        ...     # update columns "b" and "c" on primary key conflict
+        ...     data = [dict(zip(keys, row)) for row in data_iter]
+        ...     stmt = (
+        ...         insert(table.table)
+        ...         .values(data)
+        ...     )
+        ...     stmt = stmt.on_duplicate_key_update(b=stmt.inserted.b, c=stmt.inserted.c)
+        ...     result = conn.execute(stmt)
+        ...     return result.rowcount
+        >>> df_conflict.to_sql("conflict_table", conn, if_exists="append", method=insert_on_conflict_update)  # doctest: +SKIP
+        2
+
         Specify the dtype (especially useful for integers with missing values).
         Notice that while pandas is forced to store the data as floating point,
         the database supports nullable integers. When fetching the data with
@@ -3604,6 +3636,7 @@ def to_csv(
         decimal: str = ...,
         errors: OpenFileErrors = ...,
         storage_options: StorageOptions = ...,
+        comment: str | None = ...,
     ) -> str:
         ...
 
@@ -3631,6 +3664,7 @@ def to_csv(
         decimal: str = ...,
         errors: OpenFileErrors = ...,
         storage_options: StorageOptions = ...,
+        comment: str | None = ...,
     ) -> None:
         ...
 
@@ -3662,6 +3696,7 @@ def to_csv(
         decimal: str = ".",
         errors: OpenFileErrors = "strict",
         storage_options: StorageOptions = None,
+        comment: str | None = None,
     ) -> str | None:
         r"""
         Write object to a comma-separated values (csv) file.
@@ -3767,6 +3802,13 @@ def to_csv(
 
             .. versionadded:: 1.2.0
 
+        comment : str, default None
+            If set the key and values of df.attrs will be written to the beginning
+            of the csv file, prefixed by this value, each key/value
+            pair to a single ling. To prevent downstream reading issues
+            this char will be removed from the df.attrs if present.
+            Complement of pd.read_csv's 'comment' param.
+
         Returns
         -------
         None or str
@@ -3833,6 +3875,7 @@ def to_csv(
             doublequote=doublequote,
             escapechar=escapechar,
             storage_options=storage_options,
+            comment=comment,
         )
 
     # ----------------------------------------------------------------------
@@ -3964,7 +4007,6 @@ class  max_speed
             ):
                 return self.copy(deep=None)
         elif self.ndim == 1:
-            # TODO: be consistent here for DataFrame vs Series
             raise TypeError(
                 f"{type(self).__name__}.take requires a sequence of integers, "
                 "not slice."
@@ -7469,6 +7511,39 @@ def replace(
         regex: bool_t = False,
         method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default,
     ) -> Self | None:
+        if method is not lib.no_default:
+            warnings.warn(
+                # GH#33302
+                f"The 'method' keyword in {type(self).__name__}.replace is "
+                "deprecated and will be removed in a future version.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
+        elif limit is not None:
+            warnings.warn(
+                # GH#33302
+                f"The 'limit' keyword in {type(self).__name__}.replace is "
+                "deprecated and will be removed in a future version.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
+        if (
+            value is lib.no_default
+            and method is lib.no_default
+            and not is_dict_like(to_replace)
+            and regex is False
+        ):
+            # case that goes through _replace_single and defaults to method="pad"
+            warnings.warn(
+                # GH#33302
+                f"{type(self).__name__}.replace without 'value' and with "
+                "non-dict-like 'to_replace' is deprecated "
+                "and will raise in a future version. "
+                "Explicitly specify the new values instead.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
+
         if not (
             is_scalar(to_replace)
             or is_re_compilable(to_replace)
@@ -7806,35 +7881,6 @@ def interpolate(
         3    3.0
         dtype: float64
 
-        Filling in ``NaN`` in a Series by padding, but filling at most two
-        consecutive ``NaN`` at a time.
-
-        >>> s = pd.Series([np.nan, "single_one", np.nan,
-        ...                "fill_two_more", np.nan, np.nan, np.nan,
-        ...                4.71, np.nan])
-        >>> s
-        0              NaN
-        1       single_one
-        2              NaN
-        3    fill_two_more
-        4              NaN
-        5              NaN
-        6              NaN
-        7             4.71
-        8              NaN
-        dtype: object
-        >>> s.interpolate(method='pad', limit=2)
-        0              NaN
-        1       single_one
-        2       single_one
-        3    fill_two_more
-        4    fill_two_more
-        5    fill_two_more
-        6              NaN
-        7             4.71
-        8             4.71
-        dtype: object
-
         Filling in ``NaN`` in a Series via polynomial interpolation or splines:
         Both 'polynomial' and 'spline' methods require that you also specify
         an ``order`` (int).
@@ -7899,6 +7945,29 @@ def interpolate(
                 return None
             return self.copy()
 
+        if not isinstance(method, str):
+            raise ValueError("'method' should be a string, not None.")
+        elif method.lower() in fillna_methods:
+            # GH#53581
+            warnings.warn(
+                f"{type(self).__name__}.interpolate with method={method} is "
+                "deprecated and will raise in a future version. "
+                "Use obj.ffill() or obj.bfill() instead.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
+        elif np.any(obj.dtypes == object):
+            # GH#53631
+            if not (obj.ndim == 2 and np.all(obj.dtypes == object)):
+                # don't warn in cases that already raise
+                warnings.warn(
+                    f"{type(self).__name__}.interpolate with object dtype is "
+                    "deprecated and will raise in a future version. Call "
+                    "obj.infer_objects(copy=False) before interpolating instead.",
+                    FutureWarning,
+                    stacklevel=find_stack_level(),
+                )
+
         if method not in fillna_methods:
             axis = self._info_axis_number
 
@@ -7907,20 +7976,7 @@ def interpolate(
                 "Only `method=linear` interpolation is supported on MultiIndexes."
             )
 
-        # Set `limit_direction` depending on `method`
-        if limit_direction is None:
-            limit_direction = (
-                "backward" if method in ("backfill", "bfill") else "forward"
-            )
-        else:
-            if method in ("pad", "ffill") and limit_direction != "forward":
-                raise ValueError(
-                    f"`limit_direction` must be 'forward' for method `{method}`"
-                )
-            if method in ("backfill", "bfill") and limit_direction != "backward":
-                raise ValueError(
-                    f"`limit_direction` must be 'backward' for method `{method}`"
-                )
+        limit_direction = missing.infer_limit_direction(limit_direction, method)
 
         if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")):
             raise TypeError(
@@ -7929,32 +7985,8 @@ def interpolate(
                 "column to a numeric dtype."
             )
 
-        # create/use the index
-        if method == "linear":
-            # prior default
-            index = Index(np.arange(len(obj.index)))
-        else:
-            index = obj.index
-            methods = {"index", "values", "nearest", "time"}
-            is_numeric_or_datetime = (
-                is_numeric_dtype(index.dtype)
-                or isinstance(index.dtype, DatetimeTZDtype)
-                or lib.is_np_dtype(index.dtype, "mM")
-            )
-            if method not in methods and not is_numeric_or_datetime:
-                raise ValueError(
-                    "Index column must be numeric or datetime type when "
-                    f"using {method} method other than linear. "
-                    "Try setting a numeric or datetime index column before "
-                    "interpolating."
-                )
+        index = missing.get_interp_index(method, obj.index)
 
-        if isna(index).any():
-            raise NotImplementedError(
-                "Interpolation with NaNs in the index "
-                "has not been implemented. Try filling "
-                "those NaNs before interpolating."
-            )
         new_data = obj._mgr.interpolate(
             method=method,
             axis=axis,
@@ -8140,13 +8172,13 @@ def asof(self, where, subset=None):
         locs = self.index.asof_locs(where, ~(nulls._values))
 
         # mask the missing
-        missing = locs == -1
+        mask = locs == -1
         data = self.take(locs)
         data.index = where
-        if missing.any():
+        if mask.any():
             # GH#16063 only do this setting when necessary, otherwise
             #  we'd cast e.g. bools to floats
-            data.loc[missing] = np.nan
+            data.loc[mask] = np.nan
         return data if is_list else data.iloc[-1]
 
     # ----------------------------------------------------------------------
@@ -9300,6 +9332,11 @@ def last(self, offset) -> Self:
         at_time : Select values at a particular time of the day.
         between_time : Select values between particular times of the day.
 
+        Notes
+        -----
+        .. deprecated:: 2.1.0
+            Please create a mask and filter using `.loc` instead
+
         Examples
         --------
         >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
@@ -9313,7 +9350,7 @@ def last(self, offset) -> Self:
 
         Get the rows for the last 3 days:
 
-        >>> ts.last('3D')
+        >>> ts.last('3D') # doctest: +SKIP
                     A
         2018-04-13  3
         2018-04-15  4
@@ -9322,6 +9359,13 @@ def last(self, offset) -> Self:
         3 observed days in the dataset, and therefore data for 2018-04-11 was
         not returned.
         """
+        warnings.warn(
+            "last is deprecated and will be removed in a future version. "
+            "Please create a mask and filter using `.loc` instead",
+            FutureWarning,
+            stacklevel=find_stack_level(),
+        )
+
         if not isinstance(self.index, DatetimeIndex):
             raise TypeError("'last' only supports a DatetimeIndex index")
 
@@ -9735,7 +9779,9 @@ def align(
             method = None
         if limit is lib.no_default:
             limit = None
-        method = clean_fill_method(method)
+
+        if method is not None:
+            method = clean_fill_method(method)
 
         if broadcast_axis is not lib.no_default:
             # GH#51856
@@ -11219,8 +11265,8 @@ def describe(
     def pct_change(
         self,
         periods: int = 1,
-        fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad",
-        limit: int | None = None,
+        fill_method: FillnaOptions | None | lib.NoDefault = lib.no_default,
+        limit: int | None | lib.NoDefault = lib.no_default,
         freq=None,
         **kwargs,
     ) -> Self:
@@ -11244,8 +11290,14 @@ def pct_change(
             Periods to shift for forming percent change.
         fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
             How to handle NAs **before** computing percent changes.
+
+            .. deprecated:: 2.1
+
         limit : int, default None
             The number of consecutive NAs to fill before stopping.
+
+            .. deprecated:: 2.1
+
         freq : DateOffset, timedelta, or str, optional
             Increment to use from time series API (e.g. 'M' or BDay()).
         **kwargs
@@ -11298,7 +11350,7 @@ def pct_change(
         3    85.0
         dtype: float64
 
-        >>> s.pct_change(fill_method='ffill')
+        >>> s.ffill().pct_change()
         0         NaN
         1    0.011111
         2    0.000000
@@ -11345,6 +11397,31 @@ def pct_change(
         GOOG  0.179241  0.094112   NaN
         APPL -0.252395 -0.011860   NaN
         """
+        # GH#53491
+        if fill_method is not lib.no_default or limit is not lib.no_default:
+            warnings.warn(
+                "The 'fill_method' and 'limit' keywords in "
+                f"{type(self).__name__}.pct_change are deprecated and will be "
+                "removed in a future version. Call "
+                f"{'bfill' if fill_method in ('backfill', 'bfill') else 'ffill'} "
+                "before calling pct_change instead.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
+        if fill_method is lib.no_default:
+            if self.isna().values.any():
+                warnings.warn(
+                    "The default fill_method='pad' in "
+                    f"{type(self).__name__}.pct_change is deprecated and will be "
+                    "removed in a future version. Call ffill before calling "
+                    "pct_change to retain current behavior and silence this warning.",
+                    FutureWarning,
+                    stacklevel=find_stack_level(),
+                )
+            fill_method = "pad"
+        if limit is lib.no_default:
+            limit = None
+
         axis = self._get_axis_number(kwargs.pop("axis", "index"))
         if fill_method is None:
             data = self
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 3e4da10d8b25b..43854c5849481 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -523,10 +523,16 @@ def _cython_transform(
 
         return obj._constructor(result, index=self.obj.index, name=obj.name)
 
-    def _transform_general(self, func: Callable, *args, **kwargs) -> Series:
+    def _transform_general(
+        self, func: Callable, engine, engine_kwargs, *args, **kwargs
+    ) -> Series:
         """
         Transform with a callable `func`.
         """
+        if maybe_use_numba(engine):
+            return self._transform_with_numba(
+                func, *args, engine_kwargs=engine_kwargs, **kwargs
+            )
         assert callable(func)
         klass = type(self.obj)
 
@@ -622,6 +628,22 @@ def nunique(self, dropna: bool = True) -> Series | DataFrame:
         -------
         Series
             Number of unique values within each group.
+
+        Examples
+        --------
+
+        >>> lst = ['a', 'a', 'b', 'b']
+        >>> ser = pd.Series([1, 2, 3, 3], index=lst)
+        >>> ser
+        a    1
+        a    2
+        b    3
+        b    3
+        dtype: int64
+        >>> ser.groupby(level=0).nunique()
+        a    2
+        b    1
+        dtype: int64
         """
         ids, _, _ = self.grouper.group_info
 
@@ -807,7 +829,12 @@ def value_counts(
 
             right = [diff.cumsum() - 1, codes[-1]]
 
-            _, idx = get_join_indexers(left, right, sort=False, how="left")
+            # error: Argument 1 to "get_join_indexers" has incompatible type
+            # "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray,
+            # ndarray[Any, Any]], Index, Series]]
+            _, idx = get_join_indexers(
+                left, right, sort=False, how="left"  # type: ignore[arg-type]
+            )
             out = np.where(idx != -1, out[idx], 0)
 
             if sort:
@@ -892,6 +919,27 @@ def fillna(
         --------
         ffill : Forward fill values within a group.
         bfill : Backward fill values within a group.
+
+        Examples
+        --------
+        For SeriesGroupBy:
+
+        >>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse']
+        >>> ser = pd.Series([1, None, None, 2, None], index=lst)
+        >>> ser
+        cat    1.0
+        cat    NaN
+        cat    NaN
+        mouse  2.0
+        mouse  NaN
+        dtype: float64
+        >>> ser.groupby(level=0).fillna(0, limit=1)
+        cat    1.0
+        cat    0.0
+        cat    NaN
+        mouse  2.0
+        mouse  0.0
+        dtype: float64
         """
         result = self._op_via_apply(
             "fillna",
@@ -1638,7 +1686,11 @@ def arr_func(bvalues: ArrayLike) -> ArrayLike:
         res_df = self._maybe_transpose_result(res_df)
         return res_df
 
-    def _transform_general(self, func, *args, **kwargs):
+    def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs):
+        if maybe_use_numba(engine):
+            return self._transform_with_numba(
+                func, *args, engine_kwargs=engine_kwargs, **kwargs
+            )
         from pandas.core.reshape.concat import concat
 
         applied = []
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 341e177a4a9ad..c094a62b22feb 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -135,6 +135,8 @@ class providing the base-class of operations.
 )
 
 if TYPE_CHECKING:
+    from typing import Any
+
     from pandas.core.window import (
         ExpandingGroupby,
         ExponentialMovingWindowGroupby,
@@ -341,6 +343,10 @@ class providing the base-class of operations.
 -------
 Series or DataFrame
     Computed {fname} of values within each group.
+
+Examples
+--------
+{example}
 """
 
 _pipe_template = """
@@ -919,6 +925,11 @@ def get_group(self, name, obj=None) -> DataFrame | Series:
             it is None, the object groupby was called on will
             be used.
 
+            .. deprecated:: 2.1.0
+                The obj is deprecated and will be removed in a future version.
+                Do ``df.iloc[gb.indices.get(name)]``
+                instead of ``gb.get_group(name, obj=df)``.
+
         Returns
         -------
         same type as obj
@@ -955,14 +966,21 @@ def get_group(self, name, obj=None) -> DataFrame | Series:
         owl     1  2  3
         toucan  1  5  6
         """
-        if obj is None:
-            obj = self._selected_obj
-
         inds = self._get_index(name)
         if not len(inds):
             raise KeyError(name)
 
-        return obj._take_with_is_copy(inds, axis=self.axis)
+        if obj is None:
+            return self._selected_obj.iloc[inds]
+        else:
+            warnings.warn(
+                "obj is deprecated and will be removed in a future version. "
+                "Do ``df.iloc[gb.indices.get(name)]`` "
+                "instead of ``gb.get_group(name, obj=df)``.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
+            return obj._take_with_is_copy(inds, axis=self.axis)
 
     @final
     def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:
@@ -1476,8 +1494,9 @@ def _numba_prep(self, data: DataFrame):
     def _numba_agg_general(
         self,
         func: Callable,
+        dtype_mapping: dict[np.dtype, Any],
         engine_kwargs: dict[str, bool] | None,
-        *aggregator_args,
+        **aggregator_kwargs,
     ):
         """
         Perform groupby with a standard numerical aggregation function (e.g. mean)
@@ -1492,19 +1511,26 @@ def _numba_agg_general(
 
         data = self._obj_with_exclusions
         df = data if data.ndim == 2 else data.to_frame()
-        starts, ends, sorted_index, sorted_data = self._numba_prep(df)
+
+        sorted_df = df.take(self.grouper._sort_idx, axis=self.axis)
+        sorted_ids = self.grouper._sorted_ids
+        _, _, ngroups = self.grouper.group_info
+        starts, ends = lib.generate_slices(sorted_ids, ngroups)
         aggregator = executor.generate_shared_aggregator(
-            func, **get_jit_arguments(engine_kwargs)
+            func, dtype_mapping, **get_jit_arguments(engine_kwargs)
         )
-        result = aggregator(sorted_data, starts, ends, 0, *aggregator_args)
+        result = sorted_df._mgr.apply(
+            aggregator, start=starts, end=ends, **aggregator_kwargs
+        )
+        result.axes[1] = self.grouper.result_index
+        result = df._constructor(result)
 
-        index = self.grouper.result_index
         if data.ndim == 1:
-            result_kwargs = {"name": data.name}
-            result = result.ravel()
+            result = result.squeeze("columns")
+            result.name = data.name
         else:
-            result_kwargs = {"columns": data.columns}
-        return data._constructor(result, index=index, **result_kwargs)
+            result.columns = data.columns
+        return result
 
     @final
     def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs):
@@ -1697,7 +1723,7 @@ def _agg_general(
         return result.__finalize__(self.obj, method="groupby")
 
     def _agg_py_fallback(
-        self, values: ArrayLike, ndim: int, alt: Callable
+        self, how: str, values: ArrayLike, ndim: int, alt: Callable
     ) -> ArrayLike:
         """
         Fallback to pure-python aggregation if _cython_operation raises
@@ -1723,7 +1749,12 @@ def _agg_py_fallback(
         # We do not get here with UDFs, so we know that our dtype
         #  should always be preserved by the implemented aggregations
         # TODO: Is this exactly right; see WrappedCythonOp get_result_dtype?
-        res_values = self.grouper.agg_series(ser, alt, preserve_dtype=True)
+        try:
+            res_values = self.grouper.agg_series(ser, alt, preserve_dtype=True)
+        except Exception as err:
+            msg = f"agg function failed [how->{how},dtype->{ser.dtype}]"
+            # preserve the kind of exception that raised
+            raise type(err)(msg) from err
 
         if ser.dtype == object:
             res_values = res_values.astype(object, copy=False)
@@ -1765,8 +1796,10 @@ def array_func(values: ArrayLike) -> ArrayLike:
                 # TODO: shouldn't min_count matter?
                 if how in ["any", "all", "std", "sem"]:
                     raise  # TODO: re-raise as TypeError?  should not be reached
-                result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt)
+            else:
+                return result
 
+            result = self._agg_py_fallback(how, values, ndim=data.ndim, alt=alt)
             return result
 
         new_mgr = data.grouped_reduce(array_func)
@@ -1783,22 +1816,20 @@ def _cython_transform(
 
     @final
     def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
-        if maybe_use_numba(engine):
-            return self._transform_with_numba(
-                func, *args, engine_kwargs=engine_kwargs, **kwargs
-            )
-
         # optimized transforms
         func = com.get_cython_func(func) or func
 
         if not isinstance(func, str):
-            return self._transform_general(func, *args, **kwargs)
+            return self._transform_general(func, engine, engine_kwargs, *args, **kwargs)
 
         elif func not in base.transform_kernel_allowlist:
             msg = f"'{func}' is not a valid function name for transform(name)"
             raise ValueError(msg)
         elif func in base.cythonized_kernels or func in base.transformation_kernels:
             # cythonized transform or canned "agg+broadcast"
+            if engine is not None:
+                kwargs["engine"] = engine
+                kwargs["engine_kwargs"] = engine_kwargs
             return getattr(self, func)(*args, **kwargs)
 
         else:
@@ -1813,6 +1844,9 @@ def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
                 with com.temp_setattr(self, "as_index", True):
                     # GH#49834 - result needs groups in the index for
                     # _wrap_transform_fast_result
+                    if engine is not None:
+                        kwargs["engine"] = engine
+                        kwargs["engine_kwargs"] = engine_kwargs
                     result = getattr(self, func)(*args, **kwargs)
 
             return self._wrap_transform_fast_result(result)
@@ -2184,7 +2218,9 @@ def mean(
         if maybe_use_numba(engine):
             from pandas.core._numba.kernels import sliding_mean
 
-            return self._numba_agg_general(sliding_mean, engine_kwargs)
+            return self._numba_agg_general(
+                sliding_mean, executor.float_dtype_mapping, engine_kwargs, min_periods=0
+            )
         else:
             result = self._cython_agg_general(
                 "mean",
@@ -2213,6 +2249,44 @@ def median(self, numeric_only: bool = False):
         -------
         Series or DataFrame
             Median of values within each group.
+
+        Examples
+        --------
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+        >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
+        >>> ser
+        a     7
+        a     2
+        a     8
+        b     4
+        b     3
+        b     3
+        dtype: int64
+        >>> ser.groupby(level=0).median()
+        a    7.0
+        b    3.0
+        dtype: float64
+
+        For DataFrameGroupBy:
+
+        >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]}
+        >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog',
+        ...                   'mouse', 'mouse', 'mouse', 'mouse'])
+        >>> df
+                 a  b
+          dog    1  1
+          dog    3  4
+          dog    5  8
+        mouse    7  4
+        mouse    7  4
+        mouse    8  2
+        mouse    3  1
+        >>> df.groupby(level=0).median()
+                 a    b
+        dog    3.0  4.0
+        mouse  7.0  3.0
         """
         result = self._cython_agg_general(
             "median",
@@ -2223,7 +2297,7 @@ def median(self, numeric_only: bool = False):
 
     @final
     @Substitution(name="groupby")
-    @Appender(_common_see_also)
+    @Substitution(see_also=_common_see_also)
     def std(
         self,
         ddof: int = 1,
@@ -2271,11 +2345,57 @@ def std(
         -------
         Series or DataFrame
             Standard deviation of values within each group.
+        %(see_also)s
+        Examples
+        --------
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+        >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
+        >>> ser
+        a     7
+        a     2
+        a     8
+        b     4
+        b     3
+        b     3
+        dtype: int64
+        >>> ser.groupby(level=0).std()
+        a    3.21455
+        b    0.57735
+        dtype: float64
+
+        For DataFrameGroupBy:
+
+        >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]}
+        >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog',
+        ...                   'mouse', 'mouse', 'mouse', 'mouse'])
+        >>> df
+                 a  b
+          dog    1  1
+          dog    3  4
+          dog    5  8
+        mouse    7  4
+        mouse    7  4
+        mouse    8  2
+        mouse    3  1
+        >>> df.groupby(level=0).std()
+                      a         b
+        dog    2.000000  3.511885
+        mouse  2.217356  1.500000
         """
         if maybe_use_numba(engine):
             from pandas.core._numba.kernels import sliding_var
 
-            return np.sqrt(self._numba_agg_general(sliding_var, engine_kwargs, ddof))
+            return np.sqrt(
+                self._numba_agg_general(
+                    sliding_var,
+                    executor.float_dtype_mapping,
+                    engine_kwargs,
+                    min_periods=0,
+                    ddof=ddof,
+                )
+            )
         else:
             return self._cython_agg_general(
                 "std",
@@ -2286,7 +2406,7 @@ def std(
 
     @final
     @Substitution(name="groupby")
-    @Appender(_common_see_also)
+    @Substitution(see_also=_common_see_also)
     def var(
         self,
         ddof: int = 1,
@@ -2334,11 +2454,55 @@ def var(
         -------
         Series or DataFrame
             Variance of values within each group.
+        %(see_also)s
+        Examples
+        --------
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+        >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
+        >>> ser
+        a     7
+        a     2
+        a     8
+        b     4
+        b     3
+        b     3
+        dtype: int64
+        >>> ser.groupby(level=0).var()
+        a    10.333333
+        b     0.333333
+        dtype: float64
+
+        For DataFrameGroupBy:
+
+        >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]}
+        >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog',
+        ...                   'mouse', 'mouse', 'mouse', 'mouse'])
+        >>> df
+                 a  b
+          dog    1  1
+          dog    3  4
+          dog    5  8
+        mouse    7  4
+        mouse    7  4
+        mouse    8  2
+        mouse    3  1
+        >>> df.groupby(level=0).var()
+                      a          b
+        dog    4.000000  12.333333
+        mouse  4.916667   2.250000
         """
         if maybe_use_numba(engine):
             from pandas.core._numba.kernels import sliding_var
 
-            return self._numba_agg_general(sliding_var, engine_kwargs, ddof)
+            return self._numba_agg_general(
+                sliding_var,
+                executor.float_dtype_mapping,
+                engine_kwargs,
+                min_periods=0,
+                ddof=ddof,
+            )
         else:
             return self._cython_agg_general(
                 "var",
@@ -2505,6 +2669,40 @@ def sem(self, ddof: int = 1, numeric_only: bool = False):
         -------
         Series or DataFrame
             Standard error of the mean of values within each group.
+
+        Examples
+        --------
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'b', 'b']
+        >>> ser = pd.Series([5, 10, 8, 14], index=lst)
+        >>> ser
+        a     5
+        a    10
+        b     8
+        b    14
+        dtype: int64
+        >>> ser.groupby(level=0).sem()
+        a    2.5
+        b    3.0
+        dtype: float64
+
+        For DataFrameGroupBy:
+
+        >>> data = [[1, 12, 11], [1, 15, 2], [2, 5, 8], [2, 6, 12]]
+        >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+        ...                   index=["tuna", "salmon", "catfish", "goldfish"])
+        >>> df
+                   a   b   c
+            tuna   1  12  11
+          salmon   1  15   2
+         catfish   2   5   8
+        goldfish   2   6  12
+        >>> df.groupby("a").sem()
+              b  c
+        a
+        1    1.5  4.5
+        2    0.5  2.0
         """
         if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype):
             raise TypeError(
@@ -2520,7 +2718,7 @@ def sem(self, ddof: int = 1, numeric_only: bool = False):
 
     @final
     @Substitution(name="groupby")
-    @Appender(_common_see_also)
+    @Substitution(see_also=_common_see_also)
     def size(self) -> DataFrame | Series:
         """
         Compute group sizes.
@@ -2530,6 +2728,37 @@ def size(self) -> DataFrame | Series:
         DataFrame or Series
             Number of rows in each group as a Series if as_index is True
             or a DataFrame if as_index is False.
+        %(see_also)s
+        Examples
+        --------
+
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'b']
+        >>> ser = pd.Series([1, 2, 3], index=lst)
+        >>> ser
+        a     1
+        a     2
+        b     3
+        dtype: int64
+        >>> ser.groupby(level=0).size()
+        a    2
+        b    1
+        dtype: int64
+
+        >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]]
+        >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+        ...                   index=["owl", "toucan", "eagle"])
+        >>> df
+                a  b  c
+        owl     1  2  3
+        toucan  1  5  6
+        eagle   7  8  9
+        >>> df.groupby("a").size()
+        a
+        1    2
+        7    1
+        dtype: int64
         """
         result = self.grouper.size()
 
@@ -2550,7 +2779,46 @@ def size(self) -> DataFrame | Series:
         return result
 
     @final
-    @doc(_groupby_agg_method_template, fname="sum", no=False, mc=0)
+    @doc(
+        _groupby_agg_method_template,
+        fname="sum",
+        no=False,
+        mc=0,
+        example=dedent(
+            """\
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'b', 'b']
+        >>> ser = pd.Series([1, 2, 3, 4], index=lst)
+        >>> ser
+        a    1
+        a    2
+        b    3
+        b    4
+        dtype: int64
+        >>> ser.groupby(level=0).sum()
+        a    3
+        b    7
+        dtype: int64
+
+        For DataFrameGroupBy:
+
+        >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]
+        >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+        ...                   index=["tiger", "leopard", "cheetah", "lion"])
+        >>> df
+                  a  b  c
+          tiger   1  8  2
+        leopard   1  2  5
+        cheetah   2  5  8
+           lion   2  6  9
+        >>> df.groupby("a").sum()
+             b   c
+        a
+        1   10   7
+        2   11  17"""
+        ),
+    )
     def sum(
         self,
         numeric_only: bool = False,
@@ -2563,7 +2831,9 @@ def sum(
 
             return self._numba_agg_general(
                 sliding_sum,
+                executor.default_dtype_mapping,
                 engine_kwargs,
+                min_periods=min_count,
             )
         else:
             # If we are grouping on categoricals we want unobserved categories to
@@ -2580,14 +2850,92 @@ def sum(
             return self._reindex_output(result, fill_value=0)
 
     @final
-    @doc(_groupby_agg_method_template, fname="prod", no=False, mc=0)
+    @doc(
+        _groupby_agg_method_template,
+        fname="prod",
+        no=False,
+        mc=0,
+        example=dedent(
+            """\
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'b', 'b']
+        >>> ser = pd.Series([1, 2, 3, 4], index=lst)
+        >>> ser
+        a    1
+        a    2
+        b    3
+        b    4
+        dtype: int64
+        >>> ser.groupby(level=0).prod()
+        a    2
+        b   12
+        dtype: int64
+
+        For DataFrameGroupBy:
+
+        >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]
+        >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+        ...                   index=["tiger", "leopard", "cheetah", "lion"])
+        >>> df
+                  a  b  c
+          tiger   1  8  2
+        leopard   1  2  5
+        cheetah   2  5  8
+           lion   2  6  9
+        >>> df.groupby("a").prod()
+             b    c
+        a
+        1   16   10
+        2   30   72"""
+        ),
+    )
     def prod(self, numeric_only: bool = False, min_count: int = 0):
         return self._agg_general(
             numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod
         )
 
     @final
-    @doc(_groupby_agg_method_template, fname="min", no=False, mc=-1)
+    @doc(
+        _groupby_agg_method_template,
+        fname="min",
+        no=False,
+        mc=-1,
+        example=dedent(
+            """\
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'b', 'b']
+        >>> ser = pd.Series([1, 2, 3, 4], index=lst)
+        >>> ser
+        a    1
+        a    2
+        b    3
+        b    4
+        dtype: int64
+        >>> ser.groupby(level=0).min()
+        a    1
+        b    3
+        dtype: int64
+
+        For DataFrameGroupBy:
+
+        >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]
+        >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+        ...                   index=["tiger", "leopard", "cheetah", "lion"])
+        >>> df
+                  a  b  c
+          tiger   1  8  2
+        leopard   1  2  5
+        cheetah   2  5  8
+           lion   2  6  9
+        >>> df.groupby("a").min()
+            b  c
+        a
+        1   2  2
+        2   5  8"""
+        ),
+    )
     def min(
         self,
         numeric_only: bool = False,
@@ -2598,7 +2946,13 @@ def min(
         if maybe_use_numba(engine):
             from pandas.core._numba.kernels import sliding_min_max
 
-            return self._numba_agg_general(sliding_min_max, engine_kwargs, False)
+            return self._numba_agg_general(
+                sliding_min_max,
+                executor.identity_dtype_mapping,
+                engine_kwargs,
+                min_periods=min_count,
+                is_max=False,
+            )
         else:
             return self._agg_general(
                 numeric_only=numeric_only,
@@ -2608,7 +2962,46 @@ def min(
             )
 
     @final
-    @doc(_groupby_agg_method_template, fname="max", no=False, mc=-1)
+    @doc(
+        _groupby_agg_method_template,
+        fname="max",
+        no=False,
+        mc=-1,
+        example=dedent(
+            """\
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'b', 'b']
+        >>> ser = pd.Series([1, 2, 3, 4], index=lst)
+        >>> ser
+        a    1
+        a    2
+        b    3
+        b    4
+        dtype: int64
+        >>> ser.groupby(level=0).max()
+        a    2
+        b    4
+        dtype: int64
+
+        For DataFrameGroupBy:
+
+        >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]
+        >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+        ...                   index=["tiger", "leopard", "cheetah", "lion"])
+        >>> df
+                  a  b  c
+          tiger   1  8  2
+        leopard   1  2  5
+        cheetah   2  5  8
+           lion   2  6  9
+        >>> df.groupby("a").max()
+            b  c
+        a
+        1   8  5
+        2   6  9"""
+        ),
+    )
     def max(
         self,
         numeric_only: bool = False,
@@ -2619,7 +3012,13 @@ def max(
         if maybe_use_numba(engine):
             from pandas.core._numba.kernels import sliding_min_max
 
-            return self._numba_agg_general(sliding_min_max, engine_kwargs, True)
+            return self._numba_agg_general(
+                sliding_min_max,
+                executor.identity_dtype_mapping,
+                engine_kwargs,
+                min_periods=min_count,
+                is_max=True,
+            )
         else:
             return self._agg_general(
                 numeric_only=numeric_only,
@@ -2768,6 +3167,50 @@ def ohlc(self) -> DataFrame:
         -------
         DataFrame
             Open, high, low and close values within each group.
+
+        Examples
+        --------
+
+        For SeriesGroupBy:
+
+        >>> lst = ['SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC',]
+        >>> ser = pd.Series([3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 0.1, 0.5], index=lst)
+        >>> ser
+        SPX     3.4
+        CAC     9.0
+        SPX     7.2
+        CAC     5.2
+        SPX     8.8
+        CAC     9.4
+        SPX     0.1
+        CAC     0.5
+        dtype: float64
+        >>> ser.groupby(level=0).ohlc()
+             open  high  low  close
+        CAC   9.0   9.4  0.5    0.5
+        SPX   3.4   8.8  0.1    0.1
+
+        For DataFrameGroupBy:
+
+        >>> data = {2022: [1.2, 2.3, 8.9, 4.5, 4.4, 3, 2 , 1],
+        ...         2023: [3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 8.2, 1.0]}
+        >>> df = pd.DataFrame(data, index=['SPX', 'CAC', 'SPX', 'CAC',
+        ...                   'SPX', 'CAC', 'SPX', 'CAC'])
+        >>> df
+             2022  2023
+        SPX   1.2   3.4
+        CAC   2.3   9.0
+        SPX   8.9   7.2
+        CAC   4.5   5.2
+        SPX   4.4   8.8
+        CAC   3.0   9.4
+        SPX   2.0   8.2
+        CAC   1.0   1.0
+        >>> df.groupby(level=0).ohlc()
+            2022                 2023
+            open high  low close open high  low close
+        CAC  2.3  4.5  1.0   1.0  9.0  9.4  1.0   1.0
+        SPX  1.2  8.9  1.2   2.0  3.4  8.8  3.4   8.2
         """
         if self.obj.ndim == 1:
             obj = self._selected_obj
@@ -3222,6 +3665,26 @@ def ffill(self, limit: int | None = None):
 
         Examples
         --------
+
+        For SeriesGroupBy:
+
+        >>> key = [0, 0, 1, 1]
+        >>> ser = pd.Series([np.nan, 2, 3, np.nan], index=key)
+        >>> ser
+        0    NaN
+        0    2.0
+        1    3.0
+        1    NaN
+        dtype: float64
+        >>> ser.groupby(level=0).ffill()
+        0    NaN
+        0    2.0
+        1    3.0
+        1    3.0
+        dtype: float64
+
+        For DataFrameGroupBy:
+
         >>> df = pd.DataFrame(
         ...     {
         ...         "key": [0, 0, 1, 1, 1],
@@ -4279,6 +4742,44 @@ def shift(
         See Also
         --------
         Index.shift : Shift values of Index.
+
+        Examples
+        --------
+
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'b', 'b']
+        >>> ser = pd.Series([1, 2, 3, 4], index=lst)
+        >>> ser
+        a    1
+        a    2
+        b    3
+        b    4
+        dtype: int64
+        >>> ser.groupby(level=0).shift(1)
+        a    NaN
+        a    1.0
+        b    NaN
+        b    3.0
+        dtype: float64
+
+        For DataFrameGroupBy:
+
+        >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]]
+        >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+        ...                   index=["tuna", "salmon", "catfish", "goldfish"])
+        >>> df
+                   a  b  c
+            tuna   1  2  3
+          salmon   1  5  6
+         catfish   2  5  8
+        goldfish   2  6  9
+        >>> df.groupby("a").shift(1)
+                      b    c
+            tuna    NaN  NaN
+          salmon    2.0  3.0
+         catfish    NaN  NaN
+        goldfish    5.0  8.0
         """
         if axis is not lib.no_default:
             axis = self.obj._get_axis_number(axis)
@@ -4306,7 +4807,7 @@ def shift(
 
     @final
     @Substitution(name="groupby")
-    @Appender(_common_see_also)
+    @Substitution(see_also=_common_see_also)
     def diff(
         self, periods: int = 1, axis: AxisInt | lib.NoDefault = lib.no_default
     ) -> NDFrameT:
@@ -4331,6 +4832,53 @@ def diff(
         -------
         Series or DataFrame
             First differences.
+        %(see_also)s
+        Examples
+        --------
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+        >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
+        >>> ser
+        a     7
+        a     2
+        a     8
+        b     4
+        b     3
+        b     3
+        dtype: int64
+        >>> ser.groupby(level=0).diff()
+        a    NaN
+        a   -5.0
+        a    6.0
+        b    NaN
+        b   -1.0
+        b    0.0
+        dtype: float64
+
+        For DataFrameGroupBy:
+
+        >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]}
+        >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog',
+        ...                   'mouse', 'mouse', 'mouse', 'mouse'])
+        >>> df
+                 a  b
+          dog    1  1
+          dog    3  4
+          dog    5  8
+        mouse    7  4
+        mouse    7  4
+        mouse    8  2
+        mouse    3  1
+        >>> df.groupby(level=0).diff()
+                 a    b
+          dog  NaN  NaN
+          dog  2.0  3.0
+          dog  2.0  4.0
+        mouse  NaN  NaN
+        mouse  0.0  0.0
+        mouse  1.0 -2.0
+        mouse -5.0 -1.0
         """
         if axis is not lib.no_default:
             axis = self.obj._get_axis_number(axis)
@@ -4359,12 +4907,12 @@ def diff(
 
     @final
     @Substitution(name="groupby")
-    @Appender(_common_see_also)
+    @Substitution(see_also=_common_see_also)
     def pct_change(
         self,
         periods: int = 1,
-        fill_method: FillnaOptions = "ffill",
-        limit: int | None = None,
+        fill_method: FillnaOptions | lib.NoDefault = lib.no_default,
+        limit: int | None | lib.NoDefault = lib.no_default,
         freq=None,
         axis: Axis | lib.NoDefault = lib.no_default,
     ):
@@ -4375,7 +4923,70 @@ def pct_change(
         -------
         Series or DataFrame
             Percentage changes within each group.
+        %(see_also)s
+        Examples
+        --------
+
+        For SeriesGroupBy:
+
+        >>> lst = ['a', 'a', 'b', 'b']
+        >>> ser = pd.Series([1, 2, 3, 4], index=lst)
+        >>> ser
+        a    1
+        a    2
+        b    3
+        b    4
+        dtype: int64
+        >>> ser.groupby(level=0).pct_change()
+        a         NaN
+        a    1.000000
+        b         NaN
+        b    0.333333
+        dtype: float64
+
+        For DataFrameGroupBy:
+
+        >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]]
+        >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+        ...                   index=["tuna", "salmon", "catfish", "goldfish"])
+        >>> df
+                   a  b  c
+            tuna   1  2  3
+          salmon   1  5  6
+         catfish   2  5  8
+        goldfish   2  6  9
+        >>> df.groupby("a").pct_change()
+                    b  c
+            tuna    NaN    NaN
+          salmon    1.5  1.000
+         catfish    NaN    NaN
+        goldfish    0.2  0.125
         """
+        # GH#53491
+        if fill_method is not lib.no_default or limit is not lib.no_default:
+            warnings.warn(
+                "The 'fill_method' and 'limit' keywords in "
+                f"{type(self).__name__}.pct_change are deprecated and will be "
+                "removed in a future version. Call "
+                f"{'bfill' if fill_method in ('backfill', 'bfill') else 'ffill'} "
+                "before calling pct_change instead.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
+        if fill_method is lib.no_default:
+            if any(grp.isna().values.any() for _, grp in self):
+                warnings.warn(
+                    "The default fill_method='ffill' in "
+                    f"{type(self).__name__}.pct_change is deprecated and will be "
+                    "removed in a future version. Call ffill before calling "
+                    "pct_change to retain current behavior and silence this warning.",
+                    FutureWarning,
+                    stacklevel=find_stack_level(),
+                )
+            fill_method = "ffill"
+        if limit is lib.no_default:
+            limit = None
+
         if axis is not lib.no_default:
             axis = self.obj._get_axis_number(axis)
             self._deprecate_axis(axis, "pct_change")
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 09288448cec57..d10a3888c5f75 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -22,7 +22,10 @@
 
 import numpy as np
 
-from pandas._config import get_option
+from pandas._config import (
+    get_option,
+    using_copy_on_write,
+)
 
 from pandas._libs import (
     NaT,
@@ -113,6 +116,7 @@
 )
 from pandas.core.dtypes.concat import concat_compat
 from pandas.core.dtypes.dtypes import (
+    ArrowDtype,
     CategoricalDtype,
     DatetimeTZDtype,
     ExtensionDtype,
@@ -1635,7 +1639,7 @@ def to_frame(
 
         if name is lib.no_default:
             name = self._get_level_names()
-        result = DataFrame({name: self._values.copy()})
+        result = DataFrame({name: self}, copy=not using_copy_on_write())
 
         if index:
             result.index = self
@@ -3315,7 +3319,7 @@ def union(self, other, sort=None):
 
         return self._wrap_setop_result(other, result)
 
-    def _union(self, other: Index, sort):
+    def _union(self, other: Index, sort: bool | None):
         """
         Specific union logic should go here. In subclasses, union behavior
         should be overwritten here rather than in `self.union`.
@@ -3326,6 +3330,7 @@ def _union(self, other: Index, sort):
         sort : False or None, default False
             Whether to sort the resulting index.
 
+            * True : sort the result
             * False : do not sort the result.
             * None : sort the result, except when `self` and `other` are equal
               or when the values cannot be compared.
@@ -3338,7 +3343,7 @@ def _union(self, other: Index, sort):
         rvals = other._values
 
         if (
-            sort is None
+            sort in (None, True)
             and self.is_monotonic_increasing
             and other.is_monotonic_increasing
             and not (self.has_duplicates and other.has_duplicates)
@@ -4187,7 +4192,7 @@ def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
 
         # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able
         #  to simplify this.
-        if isinstance(self.dtype, np.dtype) and self.dtype.kind == "f":
+        if lib.is_np_dtype(self.dtype, "f"):
             # We always treat __getitem__ slicing as label-based
             # translate to locations
             return self.slice_indexer(start, stop, step)
@@ -5054,6 +5059,12 @@ def values(self) -> ArrayLike:
         >>> idx.values
         array([1, 2, 3])
         """
+        if using_copy_on_write():
+            data = self._data
+            if isinstance(data, np.ndarray):
+                data = data.view()
+                data.flags.writeable = False
+            return data
         return self._data
 
     @cache_readonly
@@ -6036,7 +6047,9 @@ def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]
         if isinstance(key, Index):
             # GH 42790 - Preserve name from an Index
             keyarr.name = key.name
-        if keyarr.dtype.kind in "mM":
+        if lib.is_np_dtype(keyarr.dtype, "mM") or isinstance(
+            keyarr.dtype, DatetimeTZDtype
+        ):
             # DTI/TDI.take can infer a freq in some cases when we dont want one
             if isinstance(key, list) or (
                 isinstance(key, type(self))
@@ -7537,6 +7550,12 @@ def _unpack_nested_dtype(other: Index) -> Index:
         # If there is ever a SparseIndex, this could get dispatched
         #  here too.
         return dtype.categories
+    elif isinstance(dtype, ArrowDtype):
+        # GH 53617
+        import pyarrow as pa
+
+        if pa.types.is_dictionary(dtype.pyarrow_dtype):
+            other = other.astype(ArrowDtype(dtype.pyarrow_dtype.value_type))
     return other
 
 
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index d818e1e862c12..2125c1aef6162 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -18,6 +18,8 @@
 
 import numpy as np
 
+from pandas._config import using_copy_on_write
+
 from pandas._libs import (
     NaT,
     Timedelta,
@@ -451,7 +453,11 @@ def _with_freq(self, freq):
     @property
     def values(self) -> np.ndarray:
         # NB: For Datetime64TZ this is lossy
-        return self._data._ndarray
+        data = self._data._ndarray
+        if using_copy_on_write():
+            data = data.view()
+            data.flags.writeable = False
+        return data
 
     @doc(DatetimeIndexOpsMixin.shift)
     def shift(self, periods: int = 1, freq=None) -> Self:
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index a1c240f72a28b..5181623c0c327 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -88,7 +88,10 @@
     Categorical,
     ExtensionArray,
 )
-from pandas.core.arrays.categorical import factorize_from_iterables
+from pandas.core.arrays.categorical import (
+    factorize_from_iterables,
+    recode_for_categories,
+)
 import pandas.core.common as com
 from pandas.core.construction import sanitize_array
 import pandas.core.indexes.base as ibase
@@ -2145,14 +2148,28 @@ def append(self, other):
         if all(
             (isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
         ):
-            arrays, names = [], []
+            codes = []
+            levels = []
+            names = []
             for i in range(self.nlevels):
-                label = self._get_level_values(i)
-                appended = [o._get_level_values(i) for o in other]
-                arrays.append(label.append(appended))
-                single_label_name = all(label.name == x.name for x in appended)
-                names.append(label.name if single_label_name else None)
-            return MultiIndex.from_arrays(arrays, names=names)
+                level_values = self.levels[i]
+                for mi in other:
+                    level_values = level_values.union(mi.levels[i])
+                level_codes = [
+                    recode_for_categories(
+                        mi.codes[i], mi.levels[i], level_values, copy=False
+                    )
+                    for mi in ([self, *other])
+                ]
+                level_name = self.names[i]
+                if any(mi.names[i] != level_name for mi in other):
+                    level_name = None
+                codes.append(np.concatenate(level_codes))
+                levels.append(level_values)
+                names.append(level_name)
+            return MultiIndex(
+                codes=codes, levels=levels, names=names, verify_integrity=False
+            )
 
         to_concat = (self._values,) + tuple(k._values for k in other)
         new_tuples = np.concatenate(to_concat)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index f693f9557ecdc..f40d63c8b8128 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -476,7 +476,7 @@ def period_range(
 
     Parameters
     ----------
-    start : str or period-like, default None
+    start : str, datetime, date, pandas.Timestamp, or period-like, default None
         Left bound for generating periods.
     end : str or period-like, default None
         Right bound for generating periods.
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 6837310ca8459..8f50788604da0 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -609,7 +609,7 @@ def _range_in_self(self, other: range) -> bool:
             return False
         return other.start in self._range and other[-1] in self._range
 
-    def _union(self, other: Index, sort):
+    def _union(self, other: Index, sort: bool | None):
         """
         Form the union of two Index objects and sorts if possible
 
@@ -617,9 +617,9 @@ def _union(self, other: Index, sort):
         ----------
         other : Index or array-like
 
-        sort : False or None, default None
+        sort : bool or None, default None
             Whether to sort (monotonically increasing) the resulting index.
-            ``sort=None`` returns a ``RangeIndex`` if possible or a sorted
+            ``sort=None|True`` returns a ``RangeIndex`` if possible or a sorted
             ``Index`` with a int64 dtype if not.
             ``sort=False`` can return a ``RangeIndex`` if self is monotonically
             increasing and other is fully contained in self. Otherwise, returns
@@ -630,7 +630,7 @@ def _union(self, other: Index, sort):
         union : Index
         """
         if isinstance(other, RangeIndex):
-            if sort is None or (
+            if sort in (None, True) or (
                 sort is False and self.step > 0 and self._range_in_self(other._range)
             ):
                 # GH 47557: Can still return a RangeIndex
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 38bf6c34bf9c9..4a2803f638c73 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -400,6 +400,29 @@ def loc(self) -> _LocIndexer:
                     max_speed
         sidewinder          7
 
+        Multiple conditional using ``&`` that returns a boolean Series
+
+        >>> df.loc[(df['max_speed'] > 1) & (df['shield'] < 8)]
+               max_speed  shield
+        viper          4       5
+
+        Multiple conditional using ``|`` that returns a boolean Series
+
+        >>> df.loc[(df['max_speed'] > 4) | (df['shield'] < 5)]
+                    max_speed  shield
+        cobra               1       2
+        sidewinder          7       8
+
+        Please ensure that each condition is wrapped in parentheses ``()``.
+        See the :ref:`user guide<indexing.boolean>`
+        for more details and explanations of Boolean indexing.
+
+        .. note::
+            If you find yourself using 3 or more conditionals in ``.loc[]``,
+            consider using :ref:`advanced indexing<advanced.advanced_hierarchical>`.
+
+            See below for using ``.loc[]`` on MultiIndex DataFrames.
+
         Callable that returns a boolean Series
 
         >>> df.loc[lambda df: df['shield'] == 8]
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 7c5d686d96939..ae820a40005df 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -233,7 +233,7 @@ def make_block_same_class(
         values,
         placement: BlockPlacement | None = None,
         refs: BlockValuesRefs | None = None,
-    ) -> Block:
+    ) -> Self:
         """Wrap given values in a block of same type as self."""
         # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet
         #  relied on it, as of 2.0 the caller is responsible for this.
@@ -503,6 +503,7 @@ def convert(
     # ---------------------------------------------------------------------
     # Array-Like Methods
 
+    @final
     @cache_readonly
     def dtype(self) -> DtypeObj:
         return self.values.dtype
@@ -559,7 +560,7 @@ def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block:
         return self.make_block(result)
 
     @final
-    def copy(self, deep: bool = True) -> Block:
+    def copy(self, deep: bool = True) -> Self:
         """copy constructor"""
         values = self.values
         refs: BlockValuesRefs | None
@@ -1350,7 +1351,7 @@ def interpolate(
         index: Index | None = None,
         inplace: bool = False,
         limit: int | None = None,
-        limit_direction: str = "forward",
+        limit_direction: Literal["forward", "backward", "both"] = "forward",
         limit_area: str | None = None,
         fill_value: Any | None = None,
         downcast: Literal["infer"] | None = None,
@@ -1369,7 +1370,13 @@ def interpolate(
             m = missing.clean_fill_method(method)
         except ValueError:
             m = None
-        if m is None and self.dtype.kind != "f":
+            # error: Non-overlapping equality check (left operand type:
+            # "Literal['backfill', 'bfill', 'ffill', 'pad']", right
+            # operand type: "Literal['asfreq']")
+            if method == "asfreq":  # type: ignore[comparison-overlap]
+                # clean_fill_method used to allow this
+                raise
+        if m is None and self.dtype == _dtype_obj:
             # only deal with floats
             # bc we already checked that can_hold_na, we don't have int dtype here
             # test_interp_basic checks that we make a copy here
@@ -1394,18 +1401,16 @@ def interpolate(
             )
 
         refs = None
+        arr_inplace = inplace
         if inplace:
             if using_cow and self.refs.has_reference():
-                data = self.values.copy()
+                arr_inplace = False
             else:
-                data = self.values
                 refs = self.refs
-        else:
-            data = self.values.copy()
-        data = cast(np.ndarray, data)  # bc overridden by ExtensionBlock
 
-        missing.interpolate_array_2d(
-            data,
+        # Dispatch to the PandasArray method.
+        # We know self.array_values is a PandasArray bc EABlock overrides
+        new_values = cast(PandasArray, self.array_values).interpolate(
             method=method,
             axis=axis,
             index=index,
@@ -1413,8 +1418,10 @@ def interpolate(
             limit_direction=limit_direction,
             limit_area=limit_area,
             fill_value=fill_value,
+            inplace=arr_inplace,
             **kwargs,
         )
+        data = new_values._ndarray
 
         nb = self.make_block_same_class(data, refs=refs)
         return nb._maybe_downcast([nb], downcast, using_cow)
@@ -1493,7 +1500,8 @@ def quantile(
         result = ensure_block_shape(result, ndim=2)
         return new_block_2d(result, placement=self._mgr_locs)
 
-    def round(self, decimals: int, using_cow: bool = False) -> Block:
+    @final
+    def round(self, decimals: int, using_cow: bool = False) -> Self:
         """
         Rounds the values.
         If the block is not of an integer or float dtype, nothing happens.
@@ -1759,9 +1767,7 @@ def putmask(self, mask, new, using_cow: bool = False) -> list[Block]:
 
         if using_cow and self.refs.has_reference():
             values = values.copy()
-            self = self.make_block_same_class(  # type: ignore[assignment]
-                values.T if values.ndim == 2 else values
-            )
+            self = self.make_block_same_class(values.T if values.ndim == 2 else values)
 
         try:
             # Caller is responsible for ensuring matching lengths
@@ -2262,18 +2268,22 @@ def interpolate(
         if method == "linear":  # type: ignore[comparison-overlap]
             # TODO: GH#50950 implement for arbitrary EAs
             refs = None
+            arr_inplace = inplace
             if using_cow:
                 if inplace and not self.refs.has_reference():
-                    data_out = values._ndarray
                     refs = self.refs
                 else:
-                    data_out = values._ndarray.copy()
-            else:
-                data_out = values._ndarray if inplace else values._ndarray.copy()
-            missing.interpolate_array_2d(
-                data_out, method=method, limit=limit, index=index, axis=axis
+                    arr_inplace = False
+
+            new_values = self.values.interpolate(
+                method=method,
+                index=index,
+                axis=axis,
+                inplace=arr_inplace,
+                limit=limit,
+                fill_value=fill_value,
+                **kwargs,
             )
-            new_values = type(values)._simple_new(data_out, dtype=values.dtype)
             return self.make_block_same_class(new_values, refs=refs)
 
         elif values.ndim == 2 and axis == 0:
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 1d22ed3fe8897..8d12fb91887ac 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -254,6 +254,15 @@ def _concat_homogeneous_fastpath(
     """
     # assumes
     #  all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in in mgrs_indexers)
+
+    if all(not indexers for _, indexers in mgrs_indexers):
+        # https://github.com/pandas-dev/pandas/pull/52685#issuecomment-1523287739
+        arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers]
+        arr = np.concatenate(arrs).T
+        bp = libinternals.BlockPlacement(slice(shape[0]))
+        nb = new_block_2d(arr, bp)
+        return nb
+
     arr = np.empty(shape, dtype=first_dtype)
 
     if first_dtype == np.float64:
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index f080683d76df7..dc9c47a4a5e34 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -460,13 +460,19 @@ def dict_to_mgr(
         keys = list(data.keys())
         columns = Index(keys) if keys else default_index(0)
         arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
-        arrays = [arr if not isinstance(arr, Index) else arr._data for arr in arrays]
 
     if copy:
         if typ == "block":
             # We only need to copy arrays that will not get consolidated, i.e.
             #  only EA arrays
-            arrays = [x.copy() if isinstance(x, ExtensionArray) else x for x in arrays]
+            arrays = [
+                x.copy()
+                if isinstance(x, ExtensionArray)
+                else x.copy(deep=True)
+                if isinstance(x, Index)
+                else x
+                for x in arrays
+            ]
         else:
             # dtype check to exclude e.g. range objects, scalars
             arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays]
@@ -573,10 +579,10 @@ def _homogenize(
     refs: list[Any] = []
 
     for val in data:
-        if isinstance(val, ABCSeries):
+        if isinstance(val, (ABCSeries, Index)):
             if dtype is not None:
                 val = val.astype(dtype, copy=False)
-            if val.index is not index:
+            if isinstance(val, ABCSeries) and val.index is not index:
                 # Forces alignment. No need to copy data since we
                 # are putting it into an ndarray later
                 val = val.reindex(index, copy=False)
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 2a7c0536c66a4..bb745f61ab221 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -939,8 +939,7 @@ def take(
         -------
         BlockManager
         """
-        assert isinstance(indexer, np.ndarray), type(indexer)
-        assert indexer.dtype == np.intp, indexer.dtype
+        # Caller is responsible for ensuring indexer annotation is accurate
 
         n = self.shape[axis]
         indexer = maybe_convert_indices(indexer, n, verify=verify)
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 7762ba8e2c730..2e05375ca85e7 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -10,6 +10,7 @@
 from typing import (
     TYPE_CHECKING,
     Any,
+    Literal,
     cast,
 )
 
@@ -22,7 +23,6 @@
 )
 from pandas._typing import (
     ArrayLike,
-    Axis,
     AxisInt,
     F,
     ReindexMethod,
@@ -33,10 +33,12 @@
 from pandas.core.dtypes.cast import infer_dtype_from
 from pandas.core.dtypes.common import (
     is_array_like,
+    is_numeric_dtype,
     is_numeric_v_string_like,
     is_object_dtype,
     needs_i8_conversion,
 )
+from pandas.core.dtypes.dtypes import DatetimeTZDtype
 from pandas.core.dtypes.missing import (
     is_valid_na_for_dtype,
     isna,
@@ -122,11 +124,7 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
     return mask
 
 
-def clean_fill_method(method: str | None, allow_nearest: bool = False):
-    # asfreq is compat for resampling
-    if method in [None, "asfreq"]:
-        return None
-
+def clean_fill_method(method: str, allow_nearest: bool = False):
     if isinstance(method, str):
         method = method.lower()
         if method == "ffill":
@@ -225,6 +223,85 @@ def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None:
     return idxpos  # type: ignore[return-value]
 
 
+def validate_limit_direction(
+    limit_direction: str,
+) -> Literal["forward", "backward", "both"]:
+    valid_limit_directions = ["forward", "backward", "both"]
+    limit_direction = limit_direction.lower()
+    if limit_direction not in valid_limit_directions:
+        raise ValueError(
+            "Invalid limit_direction: expecting one of "
+            f"{valid_limit_directions}, got '{limit_direction}'."
+        )
+    # error: Incompatible return value type (got "str", expected
+    # "Literal['forward', 'backward', 'both']")
+    return limit_direction  # type: ignore[return-value]
+
+
+def validate_limit_area(limit_area: str | None) -> Literal["inside", "outside"] | None:
+    if limit_area is not None:
+        valid_limit_areas = ["inside", "outside"]
+        limit_area = limit_area.lower()
+        if limit_area not in valid_limit_areas:
+            raise ValueError(
+                f"Invalid limit_area: expecting one of {valid_limit_areas}, got "
+                f"{limit_area}."
+            )
+    # error: Incompatible return value type (got "Optional[str]", expected
+    # "Optional[Literal['inside', 'outside']]")
+    return limit_area  # type: ignore[return-value]
+
+
+def infer_limit_direction(limit_direction, method):
+    # Set `limit_direction` depending on `method`
+    if limit_direction is None:
+        if method in ("backfill", "bfill"):
+            limit_direction = "backward"
+        else:
+            limit_direction = "forward"
+    else:
+        if method in ("pad", "ffill") and limit_direction != "forward":
+            raise ValueError(
+                f"`limit_direction` must be 'forward' for method `{method}`"
+            )
+        if method in ("backfill", "bfill") and limit_direction != "backward":
+            raise ValueError(
+                f"`limit_direction` must be 'backward' for method `{method}`"
+            )
+    return limit_direction
+
+
+def get_interp_index(method, index: Index) -> Index:
+    # create/use the index
+    if method == "linear":
+        # prior default
+        from pandas import Index
+
+        index = Index(np.arange(len(index)))
+    else:
+        methods = {"index", "values", "nearest", "time"}
+        is_numeric_or_datetime = (
+            is_numeric_dtype(index.dtype)
+            or isinstance(index.dtype, DatetimeTZDtype)
+            or lib.is_np_dtype(index.dtype, "mM")
+        )
+        if method not in methods and not is_numeric_or_datetime:
+            raise ValueError(
+                "Index column must be numeric or datetime type when "
+                f"using {method} method other than linear. "
+                "Try setting a numeric or datetime index column before "
+                "interpolating."
+            )
+
+    if isna(index).any():
+        raise NotImplementedError(
+            "Interpolation with NaNs in the index "
+            "has not been implemented. Try filling "
+            "those NaNs before interpolating."
+        )
+    return index
+
+
 def interpolate_array_2d(
     data: np.ndarray,
     method: str = "pad",
@@ -234,8 +311,6 @@ def interpolate_array_2d(
     limit_direction: str = "forward",
     limit_area: str | None = None,
     fill_value: Any | None = None,
-    coerce: bool = False,
-    downcast: str | None = None,
     **kwargs,
 ) -> None:
     """
@@ -260,7 +335,9 @@ def interpolate_array_2d(
             method=m,
             axis=axis,
             limit=limit,
-            limit_area=limit_area,
+            # error: Argument "limit_area" to "interpolate_2d" has incompatible
+            # type "Optional[str]"; expected "Optional[Literal['inside', 'outside']]"
+            limit_area=limit_area,  # type: ignore[arg-type]
         )
     else:
         assert index is not None  # for mypy
@@ -314,22 +391,8 @@ def _interpolate_2d_with_fill(
             )
         method = "values"
 
-    valid_limit_directions = ["forward", "backward", "both"]
-    limit_direction = limit_direction.lower()
-    if limit_direction not in valid_limit_directions:
-        raise ValueError(
-            "Invalid limit_direction: expecting one of "
-            f"{valid_limit_directions}, got '{limit_direction}'."
-        )
-
-    if limit_area is not None:
-        valid_limit_areas = ["inside", "outside"]
-        limit_area = limit_area.lower()
-        if limit_area not in valid_limit_areas:
-            raise ValueError(
-                f"Invalid limit_area: expecting one of {valid_limit_areas}, got "
-                f"{limit_area}."
-            )
+    limit_direction = validate_limit_direction(limit_direction)
+    limit_area_validated = validate_limit_area(limit_area)
 
     # default limit is unlimited GH #16282
     limit = algos.validate_limit(nobs=None, limit=limit)
@@ -345,7 +408,7 @@ def func(yvalues: np.ndarray) -> None:
             method=method,
             limit=limit,
             limit_direction=limit_direction,
-            limit_area=limit_area,
+            limit_area=limit_area_validated,
             fill_value=fill_value,
             bounds_error=False,
             **kwargs,
@@ -385,10 +448,10 @@ def _index_to_interp_indices(index: Index, method: str) -> np.ndarray:
 def _interpolate_1d(
     indices: np.ndarray,
     yvalues: np.ndarray,
-    method: str | None = "linear",
+    method: str = "linear",
     limit: int | None = None,
     limit_direction: str = "forward",
-    limit_area: str | None = None,
+    limit_area: Literal["inside", "outside"] | None = None,
     fill_value: Any | None = None,
     bounds_error: bool = False,
     order: int | None = None,
@@ -491,10 +554,10 @@ def _interpolate_1d(
 
 
 def _interpolate_scipy_wrapper(
-    x,
-    y,
-    new_x,
-    method,
+    x: np.ndarray,
+    y: np.ndarray,
+    new_x: np.ndarray,
+    method: str,
     fill_value=None,
     bounds_error: bool = False,
     order=None,
@@ -517,19 +580,11 @@ def _interpolate_scipy_wrapper(
         "krogh": interpolate.krogh_interpolate,
         "from_derivatives": _from_derivatives,
         "piecewise_polynomial": _from_derivatives,
+        "cubicspline": _cubicspline_interpolate,
+        "akima": _akima_interpolate,
+        "pchip": interpolate.pchip_interpolate,
     }
 
-    if getattr(x, "_is_all_dates", False):
-        # GH 5975, scipy.interp1d can't handle datetime64s
-        x, new_x = x._values.astype("i8"), new_x.astype("i8")
-
-    if method == "pchip":
-        alt_methods["pchip"] = interpolate.pchip_interpolate
-    elif method == "akima":
-        alt_methods["akima"] = _akima_interpolate
-    elif method == "cubicspline":
-        alt_methods["cubicspline"] = _cubicspline_interpolate
-
     interp1d_methods = [
         "nearest",
         "zero",
@@ -540,9 +595,11 @@ def _interpolate_scipy_wrapper(
     ]
     if method in interp1d_methods:
         if method == "polynomial":
-            method = order
+            kind = order
+        else:
+            kind = method
         terp = interpolate.interp1d(
-            x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error
+            x, y, kind=kind, fill_value=fill_value, bounds_error=bounds_error
         )
         new_y = terp(new_x)
     elif method == "spline":
@@ -562,13 +619,18 @@ def _interpolate_scipy_wrapper(
             y = y.copy()
         if not new_x.flags.writeable:
             new_x = new_x.copy()
-        method = alt_methods[method]
-        new_y = method(x, y, new_x, **kwargs)
+        terp = alt_methods[method]
+        new_y = terp(x, y, new_x, **kwargs)
     return new_y
 
 
 def _from_derivatives(
-    xi, yi, x, order=None, der: int | list[int] | None = 0, extrapolate: bool = False
+    xi: np.ndarray,
+    yi: np.ndarray,
+    x: np.ndarray,
+    order=None,
+    der: int | list[int] | None = 0,
+    extrapolate: bool = False,
 ):
     """
     Convenience function for interpolate.BPoly.from_derivatives.
@@ -612,7 +674,13 @@ def _from_derivatives(
     return m(x)
 
 
-def _akima_interpolate(xi, yi, x, der: int | list[int] | None = 0, axis: AxisInt = 0):
+def _akima_interpolate(
+    xi: np.ndarray,
+    yi: np.ndarray,
+    x: np.ndarray,
+    der: int | list[int] | None = 0,
+    axis: AxisInt = 0,
+):
     """
     Convenience function for akima interpolation.
     xi and yi are arrays of values used to approximate some function f,
@@ -622,13 +690,13 @@ def _akima_interpolate(xi, yi, x, der: int | list[int] | None = 0, axis: AxisInt
 
     Parameters
     ----------
-    xi : array-like
+    xi : np.ndarray
         A sorted list of x-coordinates, of length N.
-    yi : array-like
+    yi : np.ndarray
         A 1-D array of real values.  `yi`'s length along the interpolation
         axis must be equal to the length of `xi`. If N-D array, use axis
         parameter to select correct axis.
-    x : scalar or array-like
+    x : np.ndarray
         Of length M.
     der : int, optional
         How many derivatives to extract; None for all potentially
@@ -656,9 +724,9 @@ def _akima_interpolate(xi, yi, x, der: int | list[int] | None = 0, axis: AxisInt
 
 
 def _cubicspline_interpolate(
-    xi,
-    yi,
-    x,
+    xi: np.ndarray,
+    yi: np.ndarray,
+    x: np.ndarray,
     axis: AxisInt = 0,
     bc_type: str | tuple[Any, Any] = "not-a-knot",
     extrapolate=None,
@@ -670,14 +738,14 @@ def _cubicspline_interpolate(
 
     Parameters
     ----------
-    xi : array-like, shape (n,)
+    xi : np.ndarray, shape (n,)
         1-d array containing values of the independent variable.
         Values must be real, finite and in strictly increasing order.
-    yi : array-like
+    yi : np.ndarray
         Array containing values of the dependent variable. It can have
         arbitrary number of dimensions, but the length along ``axis``
         (see below) must match the length of ``x``. Values must be finite.
-    x : scalar or array-like, shape (m,)
+    x : np.ndarray, shape (m,)
     axis : int, optional
         Axis along which `y` is assumed to be varying. Meaning that for
         ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
@@ -742,7 +810,10 @@ def _cubicspline_interpolate(
 
 
 def _interpolate_with_limit_area(
-    values: np.ndarray, method: str, limit: int | None, limit_area: str | None
+    values: np.ndarray,
+    method: Literal["pad", "backfill"],
+    limit: int | None,
+    limit_area: Literal["inside", "outside"],
 ) -> None:
     """
     Apply interpolation and limit_area logic to values along a to-be-specified axis.
@@ -755,8 +826,8 @@ def _interpolate_with_limit_area(
         Interpolation method. Could be "bfill" or "pad"
     limit: int, optional
         Index limit on interpolation.
-    limit_area: str
-        Limit area for interpolation. Can be "inside" or "outside"
+    limit_area: {'inside', 'outside'}
+        Limit area for interpolation.
 
     Notes
     -----
@@ -784,16 +855,18 @@ def _interpolate_with_limit_area(
             invalid[first : last + 1] = False
         elif limit_area == "outside":
             invalid[:first] = invalid[last + 1 :] = False
+        else:
+            raise ValueError("limit_area should be 'inside' or 'outside'")
 
         values[invalid] = np.nan
 
 
 def interpolate_2d(
     values: np.ndarray,
-    method: str = "pad",
-    axis: Axis = 0,
+    method: Literal["pad", "backfill"] = "pad",
+    axis: AxisInt = 0,
     limit: int | None = None,
-    limit_area: str | None = None,
+    limit_area: Literal["inside", "outside"] | None = None,
 ) -> None:
     """
     Perform an actual interpolation of values, values will be make 2-d if
@@ -832,9 +905,7 @@ def interpolate_2d(
                 limit=limit,
                 limit_area=limit_area,
             ),
-            # error: Argument 2 to "apply_along_axis" has incompatible type
-            # "Union[str, int]"; expected "SupportsIndex"
-            axis,  # type: ignore[arg-type]
+            axis,
             values,
         )
         return
@@ -850,12 +921,9 @@ def interpolate_2d(
     method = clean_fill_method(method)
     tvalues = transf(values)
 
+    func = get_fill_func(method, ndim=2)
     # _pad_2d and _backfill_2d both modify tvalues inplace
-    if method == "pad":
-        _pad_2d(tvalues, limit=limit)
-    else:
-        _backfill_2d(tvalues, limit=limit)
-
+    func(tvalues, limit=limit)
     return
 
 
@@ -921,7 +989,7 @@ def _pad_2d(
 ):
     mask = _fillna_prep(values, mask)
 
-    if np.all(values.shape):
+    if values.size:
         algos.pad_2d_inplace(values, mask, limit=limit)
     else:
         # for test coverage
@@ -935,7 +1003,7 @@ def _backfill_2d(
 ):
     mask = _fillna_prep(values, mask)
 
-    if np.all(values.shape):
+    if values.size:
         algos.backfill_2d_inplace(values, mask, limit=limit)
     else:
         # for test coverage
@@ -954,10 +1022,14 @@ def get_fill_func(method, ndim: int = 1):
 
 
 def clean_reindex_fill_method(method) -> ReindexMethod | None:
+    if method is None:
+        return None
     return clean_fill_method(method, allow_nearest=True)
 
 
-def _interp_limit(invalid: npt.NDArray[np.bool_], fw_limit, bw_limit):
+def _interp_limit(
+    invalid: npt.NDArray[np.bool_], fw_limit: int | None, bw_limit: int | None
+):
     """
     Get indexers of values that won't be filled
     because they exceed the limits.
diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py
index 9a469169151c3..bd2e532536d84 100644
--- a/pandas/core/ops/docstrings.py
+++ b/pandas/core/ops/docstrings.py
@@ -463,7 +463,7 @@ def make_flex_doc(op_name: str, typ: str) -> str:
 Equivalent to ``{equiv}``, but with support to substitute a fill_value
 for missing data in one of the inputs. With reverse version, `{reverse}`.
 
-Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
+Among flexible wrappers (`add`, `sub`, `mul`, `div`, `floordiv`, `mod`, `pow`) to
 arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
 
 Parameters
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 8291162db0834..3fc0c47dd175e 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1506,6 +1506,8 @@ def _upsample(self, method, limit: int | None = None, fill_value=None):
             result = obj.copy()
             result.index = res_index
         else:
+            if method == "asfreq":
+                method = None
             result = obj.reindex(
                 res_index, method=method, limit=limit, fill_value=fill_value
             )
@@ -1624,6 +1626,8 @@ def _upsample(self, method, limit: int | None = None, fill_value=None):
         memb = ax.asfreq(self.freq, how=self.convention)
 
         # Get the fill indexer
+        if method == "asfreq":
+            method = None
         indexer = memb.get_indexer(new_index, method=method, limit=limit)
         new_obj = _take_new_index(
             obj,
@@ -1888,7 +1892,9 @@ def _get_time_bins(self, ax: DatetimeIndex):
             )
 
         if len(ax) == 0:
-            binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
+            binner = labels = DatetimeIndex(
+                data=[], freq=self.freq, name=ax.name, dtype=ax.dtype
+            )
             return binner, [], labels
 
         first, last = _get_timestamp_range_edges(
@@ -2019,8 +2025,10 @@ def _get_time_period_bins(self, ax: DatetimeIndex):
 
         freq = self.freq
 
-        if not len(ax):
-            binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name)
+        if len(ax) == 0:
+            binner = labels = PeriodIndex(
+                data=[], freq=freq, name=ax.name, dtype=ax.dtype
+            )
             return binner, [], labels
 
         labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index e8420983c65e7..4d1f8bd6301d0 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -3,7 +3,6 @@
 """
 from __future__ import annotations
 
-import copy as cp
 import datetime
 from functools import partial
 import string
@@ -13,6 +12,7 @@
     Literal,
     Sequence,
     cast,
+    final,
 )
 import uuid
 import warnings
@@ -29,7 +29,6 @@
 from pandas._typing import (
     AnyArrayLike,
     ArrayLike,
-    AxisInt,
     DtypeObj,
     IndexLabel,
     JoinHow,
@@ -54,7 +53,6 @@
     ensure_object,
     is_bool,
     is_bool_dtype,
-    is_extension_array_dtype,
     is_float_dtype,
     is_integer,
     is_integer_dtype,
@@ -648,16 +646,14 @@ class _MergeOperation:
     right_on: Sequence[Hashable | AnyArrayLike]
     left_index: bool
     right_index: bool
-    axis: AxisInt
-    bm_axis: AxisInt
     sort: bool
     suffixes: Suffixes
     copy: bool
     indicator: str | bool
     validate: str | None
     join_names: list[Hashable]
-    right_join_keys: list[AnyArrayLike]
-    left_join_keys: list[AnyArrayLike]
+    right_join_keys: list[ArrayLike]
+    left_join_keys: list[ArrayLike]
 
     def __init__(
         self,
@@ -667,7 +663,6 @@ def __init__(
         on: IndexLabel | None = None,
         left_on: IndexLabel | None = None,
         right_on: IndexLabel | None = None,
-        axis: AxisInt = 1,
         left_index: bool = False,
         right_index: bool = False,
         sort: bool = True,
@@ -681,11 +676,6 @@ def __init__(
         self.right = self.orig_right = _right
         self.how = how
 
-        # bm_axis -> the axis on the BlockManager
-        self.bm_axis = axis
-        # axis --> the axis on the Series/DataFrame
-        self.axis = 1 - axis if self.left.ndim == 2 else 0
-
         self.on = com.maybe_make_list(on)
 
         self.suffixes = suffixes
@@ -744,6 +734,7 @@ def __init__(
         if validate is not None:
             self._validate(validate)
 
+    @final
     def _reindex_and_concat(
         self,
         join_index: Index,
@@ -822,12 +813,14 @@ def get_result(self, copy: bool | None = True) -> DataFrame:
 
         return result.__finalize__(self, method="merge")
 
+    @final
     def _maybe_drop_cross_column(
         self, result: DataFrame, cross_col: str | None
     ) -> None:
         if cross_col is not None:
             del result[cross_col]
 
+    @final
     @cache_readonly
     def _indicator_name(self) -> str | None:
         if isinstance(self.indicator, str):
@@ -839,6 +832,7 @@ def _indicator_name(self) -> str | None:
                 "indicator option can only accept boolean or string arguments"
             )
 
+    @final
     def _indicator_pre_merge(
         self, left: DataFrame, right: DataFrame
     ) -> tuple[DataFrame, DataFrame]:
@@ -866,6 +860,7 @@ def _indicator_pre_merge(
 
         return left, right
 
+    @final
     def _indicator_post_merge(self, result: DataFrame) -> DataFrame:
         result["_left_indicator"] = result["_left_indicator"].fillna(0)
         result["_right_indicator"] = result["_right_indicator"].fillna(0)
@@ -881,6 +876,7 @@ def _indicator_post_merge(self, result: DataFrame) -> DataFrame:
         result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1)
         return result
 
+    @final
     def _maybe_restore_index_levels(self, result: DataFrame) -> None:
         """
         Restore index levels specified as `on` parameters
@@ -924,11 +920,12 @@ def _maybe_restore_index_levels(self, result: DataFrame) -> None:
         if names_to_restore:
             result.set_index(names_to_restore, inplace=True)
 
+    @final
     def _maybe_add_join_keys(
         self,
         result: DataFrame,
-        left_indexer: np.ndarray | None,
-        right_indexer: np.ndarray | None,
+        left_indexer: npt.NDArray[np.intp] | None,
+        right_indexer: npt.NDArray[np.intp] | None,
     ) -> None:
         left_has_missing = None
         right_has_missing = None
@@ -1033,13 +1030,14 @@ def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]
             self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how
         )
 
+    @final
     def _get_join_info(
         self,
     ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
         # make mypy happy
         assert self.how != "cross"
-        left_ax = self.left.axes[self.axis]
-        right_ax = self.right.axes[self.axis]
+        left_ax = self.left.index
+        right_ax = self.right.index
 
         if self.left_index and self.right_index and self.how != "asof":
             join_index, left_indexer, right_indexer = left_ax.join(
@@ -1061,32 +1059,32 @@ def _get_join_info(
             if self.right_index:
                 if len(self.left) > 0:
                     join_index = self._create_join_index(
-                        self.left.index,
-                        self.right.index,
+                        left_ax,
+                        right_ax,
                         left_indexer,
                         how="right",
                     )
                 else:
-                    join_index = self.right.index.take(right_indexer)
+                    join_index = right_ax.take(right_indexer)
             elif self.left_index:
                 if self.how == "asof":
                     # GH#33463 asof should always behave like a left merge
                     join_index = self._create_join_index(
-                        self.left.index,
-                        self.right.index,
+                        left_ax,
+                        right_ax,
                         left_indexer,
                         how="left",
                     )
 
                 elif len(self.right) > 0:
                     join_index = self._create_join_index(
-                        self.right.index,
-                        self.left.index,
+                        right_ax,
+                        left_ax,
                         right_indexer,
                         how="left",
                     )
                 else:
-                    join_index = self.left.index.take(left_indexer)
+                    join_index = left_ax.take(left_indexer)
             else:
                 join_index = default_index(len(left_indexer))
 
@@ -1094,6 +1092,7 @@ def _get_join_info(
             join_index = default_index(0).set_names(join_index.name)
         return join_index, left_indexer, right_indexer
 
+    @final
     def _create_join_index(
         self,
         index: Index,
@@ -1130,7 +1129,7 @@ def _create_join_index(
 
     def _get_merge_keys(
         self,
-    ) -> tuple[list[AnyArrayLike], list[AnyArrayLike], list[Hashable]]:
+    ) -> tuple[list[ArrayLike], list[ArrayLike], list[Hashable]]:
         """
         Note: has side effects (copy/delete key columns)
 
@@ -1146,8 +1145,8 @@ def _get_merge_keys(
         """
         # left_keys, right_keys entries can actually be anything listlike
         #  with a 'dtype' attr
-        left_keys: list[AnyArrayLike] = []
-        right_keys: list[AnyArrayLike] = []
+        left_keys: list[ArrayLike] = []
+        right_keys: list[ArrayLike] = []
         join_names: list[Hashable] = []
         right_drop: list[Hashable] = []
         left_drop: list[Hashable] = []
@@ -1170,11 +1169,13 @@ def _get_merge_keys(
         # ugh, spaghetti re #733
         if _any(self.left_on) and _any(self.right_on):
             for lk, rk in zip(self.left_on, self.right_on):
+                lk = extract_array(lk, extract_numpy=True)
+                rk = extract_array(rk, extract_numpy=True)
                 if is_lkey(lk):
-                    lk = cast(AnyArrayLike, lk)
+                    lk = cast(ArrayLike, lk)
                     left_keys.append(lk)
                     if is_rkey(rk):
-                        rk = cast(AnyArrayLike, rk)
+                        rk = cast(ArrayLike, rk)
                         right_keys.append(rk)
                         join_names.append(None)  # what to do?
                     else:
@@ -1186,7 +1187,7 @@ def _get_merge_keys(
                             join_names.append(rk)
                         else:
                             # work-around for merge_asof(right_index=True)
-                            right_keys.append(right.index)
+                            right_keys.append(right.index._values)
                             join_names.append(right.index.name)
                 else:
                     if not is_rkey(rk):
@@ -1197,7 +1198,7 @@ def _get_merge_keys(
                             right_keys.append(right._get_label_or_level_values(rk))
                         else:
                             # work-around for merge_asof(right_index=True)
-                            right_keys.append(right.index)
+                            right_keys.append(right.index._values)
                         if lk is not None and lk == rk:  # FIXME: what about other NAs?
                             # avoid key upcast in corner case (length-0)
                             lk = cast(Hashable, lk)
@@ -1206,7 +1207,7 @@ def _get_merge_keys(
                             else:
                                 left_drop.append(lk)
                     else:
-                        rk = cast(AnyArrayLike, rk)
+                        rk = cast(ArrayLike, rk)
                         right_keys.append(rk)
                     if lk is not None:
                         # Then we're either Hashable or a wrong-length arraylike,
@@ -1216,12 +1217,13 @@ def _get_merge_keys(
                         join_names.append(lk)
                     else:
                         # work-around for merge_asof(left_index=True)
-                        left_keys.append(left.index)
+                        left_keys.append(left.index._values)
                         join_names.append(left.index.name)
         elif _any(self.left_on):
             for k in self.left_on:
                 if is_lkey(k):
-                    k = cast(AnyArrayLike, k)
+                    k = extract_array(k, extract_numpy=True)
+                    k = cast(ArrayLike, k)
                     left_keys.append(k)
                     join_names.append(None)
                 else:
@@ -1241,8 +1243,9 @@ def _get_merge_keys(
                 right_keys = [self.right.index._values]
         elif _any(self.right_on):
             for k in self.right_on:
+                k = extract_array(k, extract_numpy=True)
                 if is_rkey(k):
-                    k = cast(AnyArrayLike, k)
+                    k = cast(ArrayLike, k)
                     right_keys.append(k)
                     join_names.append(None)
                 else:
@@ -1269,6 +1272,7 @@ def _get_merge_keys(
 
         return left_keys, right_keys, join_names
 
+    @final
     def _maybe_coerce_merge_keys(self) -> None:
         # we have valid merges but we may have to further
         # coerce these if they are originally incompatible types
@@ -1433,6 +1437,7 @@ def _maybe_coerce_merge_keys(self) -> None:
                 self.right = self.right.copy()
                 self.right[name] = self.right[name].astype(typ)
 
+    @final
     def _create_cross_configuration(
         self, left: DataFrame, right: DataFrame
     ) -> tuple[DataFrame, DataFrame, JoinHow, str]:
@@ -1611,18 +1616,17 @@ def _validate(self, validate: str) -> None:
 
 
 def get_join_indexers(
-    left_keys,
-    right_keys,
+    left_keys: list[ArrayLike],
+    right_keys: list[ArrayLike],
     sort: bool = False,
     how: MergeHow | Literal["asof"] = "inner",
-    **kwargs,
 ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
     """
 
     Parameters
     ----------
-    left_keys : ndarray, Index, Series
-    right_keys : ndarray, Index, Series
+    left_keys : list[ndarray, ExtensionArray, Index, Series]
+    right_keys : list[ndarray, ExtensionArray, Index, Series]
     sort : bool, default False
     how : {'inner', 'outer', 'left', 'right'}, default 'inner'
 
@@ -1668,7 +1672,7 @@ def get_join_indexers(
 
     lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort, how=how)
     # preserve left frame order if how == 'left' and sort == False
-    kwargs = cp.copy(kwargs)
+    kwargs = {}
     if how in ("left", "right"):
         kwargs["sort"] = sort
     join_func = {
@@ -1785,7 +1789,6 @@ def __init__(
         right_on: IndexLabel | None = None,
         left_index: bool = False,
         right_index: bool = False,
-        axis: AxisInt = 1,
         suffixes: Suffixes = ("_x", "_y"),
         fill_method: str | None = None,
         how: JoinHow | Literal["asof"] = "outer",
@@ -1800,7 +1803,6 @@ def __init__(
             left_index=left_index,
             right_index=right_index,
             right_on=right_on,
-            axis=axis,
             how=how,
             suffixes=suffixes,
             sort=True,  # factorize sorts
@@ -1813,8 +1815,8 @@ def get_result(self, copy: bool | None = True) -> DataFrame:
             self.left._info_axis, self.right._info_axis, self.suffixes
         )
 
-        left_join_indexer: np.ndarray | None
-        right_join_indexer: np.ndarray | None
+        left_join_indexer: npt.NDArray[np.intp] | None
+        right_join_indexer: npt.NDArray[np.intp] | None
 
         if self.fill_method == "ffill":
             if left_indexer is None:
@@ -1873,10 +1875,7 @@ def __init__(
         by=None,
         left_by=None,
         right_by=None,
-        axis: AxisInt = 1,
         suffixes: Suffixes = ("_x", "_y"),
-        copy: bool = True,
-        fill_method: str | None = None,
         how: Literal["asof"] = "asof",
         tolerance=None,
         allow_exact_matches: bool = True,
@@ -1898,10 +1897,9 @@ def __init__(
             right_on=right_on,
             left_index=left_index,
             right_index=right_index,
-            axis=axis,
             how=how,
             suffixes=suffixes,
-            fill_method=fill_method,
+            fill_method=None,
         )
 
     def _validate_left_right_on(self, left_on, right_on):
@@ -1985,7 +1983,7 @@ def _validate_left_right_on(self, left_on, right_on):
 
     def _get_merge_keys(
         self,
-    ) -> tuple[list[AnyArrayLike], list[AnyArrayLike], list[Hashable]]:
+    ) -> tuple[list[ArrayLike], list[ArrayLike], list[Hashable]]:
         # note this function has side effects
         (left_join_keys, right_join_keys, join_names) = super()._get_merge_keys()
 
@@ -2017,8 +2015,7 @@ def _get_merge_keys(
         # validate tolerance; datetime.timedelta or Timedelta if we have a DTI
         if self.tolerance is not None:
             if self.left_index:
-                # Actually more specifically an Index
-                lt = cast(AnyArrayLike, self.left.index)
+                lt = self.left.index._values
             else:
                 lt = left_join_keys[-1]
 
@@ -2027,19 +2024,19 @@ def _get_merge_keys(
                 f"with type {repr(lt.dtype)}"
             )
 
-            if needs_i8_conversion(getattr(lt, "dtype", None)):
+            if needs_i8_conversion(lt.dtype):
                 if not isinstance(self.tolerance, datetime.timedelta):
                     raise MergeError(msg)
                 if self.tolerance < Timedelta(0):
                     raise MergeError("tolerance must be positive")
 
-            elif is_integer_dtype(lt):
+            elif is_integer_dtype(lt.dtype):
                 if not is_integer(self.tolerance):
                     raise MergeError(msg)
                 if self.tolerance < 0:
                     raise MergeError("tolerance must be positive")
 
-            elif is_float_dtype(lt):
+            elif is_float_dtype(lt.dtype):
                 if not is_number(self.tolerance):
                     raise MergeError(msg)
                 # error: Unsupported operand types for > ("int" and "Number")
@@ -2062,11 +2059,11 @@ def _get_merge_keys(
     def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
         """return the join indexers"""
 
-        def flip(xs) -> np.ndarray:
+        def flip(xs: list[ArrayLike]) -> np.ndarray:
             """unlike np.transpose, this returns an array of tuples"""
 
-            def injection(obj):
-                if not is_extension_array_dtype(obj):
+            def injection(obj: ArrayLike):
+                if not isinstance(obj.dtype, ExtensionDtype):
                     # ndarray
                     return obj
                 obj = extract_array(obj)
@@ -2213,11 +2210,11 @@ def injection(obj):
 
 
 def _get_multiindex_indexer(
-    join_keys, index: MultiIndex, sort: bool
+    join_keys: list[ArrayLike], index: MultiIndex, sort: bool
 ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
     # left & right join labels and num. of levels at each location
     mapped = (
-        _factorize_keys(index.levels[n], join_keys[n], sort=sort)
+        _factorize_keys(index.levels[n]._values, join_keys[n], sort=sort)
         for n in range(index.nlevels)
     )
     zipped = zip(*mapped)
@@ -2250,7 +2247,7 @@ def _get_multiindex_indexer(
 
 
 def _get_single_indexer(
-    join_key, index: Index, sort: bool = False
+    join_key: ArrayLike, index: Index, sort: bool = False
 ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
     left_key, right_key, count = _factorize_keys(join_key, index._values, sort=sort)
 
@@ -2295,7 +2292,7 @@ def _get_no_sort_one_missing_indexer(
 
 
 def _left_join_on_index(
-    left_ax: Index, right_ax: Index, join_keys, sort: bool = False
+    left_ax: Index, right_ax: Index, join_keys: list[ArrayLike], sort: bool = False
 ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp]]:
     if isinstance(right_ax, MultiIndex):
         left_indexer, right_indexer = _get_multiindex_indexer(
@@ -2328,9 +2325,9 @@ def _factorize_keys(
 
     Parameters
     ----------
-    lk : array-like
+    lk : ndarray, ExtensionArray
         Left key.
-    rk : array-like
+    rk : ndarray, ExtensionArray
         Right key.
     sort : bool, defaults to True
         If True, the encoding is done such that the unique elements in the
@@ -2371,9 +2368,6 @@ def _factorize_keys(
     >>> pd.core.reshape.merge._factorize_keys(lk, rk, sort=False)
     (array([0, 1, 2]), array([0, 1]), 3)
     """
-    # Some pre-processing for non-ndarray lk / rk
-    lk = extract_array(lk, extract_numpy=True, extract_range=True)
-    rk = extract_array(rk, extract_numpy=True, extract_range=True)
     # TODO: if either is a RangeIndex, we can likely factorize more efficiently?
 
     if (
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9c7110cc21082..40c19a57466fe 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -72,7 +72,10 @@
     pandas_dtype,
     validate_all_hashable,
 )
-from pandas.core.dtypes.dtypes import ExtensionDtype
+from pandas.core.dtypes.dtypes import (
+    ArrowDtype,
+    ExtensionDtype,
+)
 from pandas.core.dtypes.generic import ABCDataFrame
 from pandas.core.dtypes.inference import is_hashable
 from pandas.core.dtypes.missing import (
@@ -4267,12 +4270,14 @@ def explode(self, ignore_index: bool = False) -> Series:
         3      4
         dtype: object
         """
-        if not len(self) or not is_object_dtype(self.dtype):
+        if isinstance(self.dtype, ArrowDtype) and self.dtype.type == list:
+            values, counts = self._values._explode()
+        elif len(self) and is_object_dtype(self.dtype):
+            values, counts = reshape.explode(np.asarray(self._values))
+        else:
             result = self.copy()
             return result.reset_index(drop=True) if ignore_index else result
 
-        values, counts = reshape.explode(np.asarray(self._values))
-
         if ignore_index:
             index = default_index(len(values))
         else:
@@ -4494,7 +4499,8 @@ def transform(
     ) -> DataFrame | Series:
         # Validate axis argument
         self._get_axis_number(axis)
-        result = SeriesApply(self, func=func, args=args, kwargs=kwargs).transform()
+        ser = self.copy(deep=False) if using_copy_on_write() else self
+        result = SeriesApply(ser, func=func, args=args, kwargs=kwargs).transform()
         return result
 
     def apply(
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 7bddaad780b8c..7579f816d0ace 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -562,6 +562,8 @@
     {inplace}
     limit : int, default None
         Maximum size gap to forward or backward fill.
+
+        .. deprecated:: 2.1.0
     regex : bool or same types as `to_replace`, default False
         Whether to interpret `to_replace` and/or `value` as regular
         expressions. If this is ``True`` then `to_replace` *must* be a
@@ -572,6 +574,8 @@
         The method to use when for replacement, when `to_replace` is a
         scalar, list or tuple and `value` is ``None``.
 
+        .. deprecated:: 2.1.0
+
     Returns
     -------
     {klass}
@@ -766,6 +770,9 @@
     4     b
     dtype: object
 
+        .. deprecated:: 2.1.0
+            The 'method' parameter and padding behavior are deprecated.
+
     On the other hand, if ``None`` is explicitly passed for ``value``, it will
     be respected:
 
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 0c0b312c11c48..b63f3f28b8f6c 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -424,7 +424,7 @@ def lexsort_indexer(
 
 def nargsort(
     items: ArrayLike | Index | Series,
-    kind: SortKind = "stable",
+    kind: SortKind = "quicksort",
     ascending: bool = True,
     na_position: str = "last",
     key: Callable | None = None,
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 127ad5e962b16..24fe7e6bfc0c1 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -20,6 +20,7 @@
     DtypeObj,
     F,
     Scalar,
+    npt,
 )
 from pandas.util._decorators import Appender
 from pandas.util._exceptions import find_stack_level
@@ -279,7 +280,7 @@ def _wrap_result(
 
                 from pandas.core.arrays.arrow.array import ArrowExtensionArray
 
-                value_lengths = result._pa_array.combine_chunks().value_lengths()
+                value_lengths = pa.compute.list_value_length(result._pa_array)
                 max_len = pa.compute.max(value_lengths).as_py()
                 min_len = pa.compute.min(value_lengths).as_py()
                 if result._hasna:
@@ -313,9 +314,14 @@ def _wrap_result(
                     labels = name
                 else:
                     labels = range(max_len)
+                result = (
+                    pa.compute.list_flatten(result._pa_array)
+                    .to_numpy()
+                    .reshape(len(result), max_len)
+                )
                 result = {
                     label: ArrowExtensionArray(pa.array(res))
-                    for label, res in zip(labels, (zip(*result.tolist())))
+                    for label, res in zip(labels, result.T)
                 }
             elif is_object_dtype(result):
 
@@ -3361,7 +3367,7 @@ def casefold(self):
     )
 
 
-def cat_safe(list_of_columns: list, sep: str):
+def cat_safe(list_of_columns: list[npt.NDArray[np.object_]], sep: str):
     """
     Auxiliary function for :meth:`str.cat`.
 
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index ad366e58c2f06..95946f0a159fd 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -7,6 +7,7 @@
     TYPE_CHECKING,
     overload,
 )
+import warnings
 
 import numpy as np
 
@@ -19,6 +20,7 @@
     Timedelta,
     parse_timedelta_unit,
 )
+from pandas.util._exceptions import find_stack_level
 
 from pandas.core.dtypes.common import is_list_like
 from pandas.core.dtypes.generic import (
@@ -118,6 +120,9 @@ def to_timedelta(
 
         Must not be specified when `arg` context strings and ``errors="raise"``.
 
+        .. deprecated:: 2.1.0
+            Units 'T' and 'L' are deprecated and will be removed in a future version.
+
     errors : {'ignore', 'raise', 'coerce'}, default 'raise'
         - If 'raise', then invalid parsing will raise an exception.
         - If 'coerce', then invalid parsing will be set as NaT.
@@ -169,6 +174,13 @@ def to_timedelta(
     TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
                    dtype='timedelta64[ns]', freq=None)
     """
+    if unit in {"T", "t", "L", "l"}:
+        warnings.warn(
+            f"Unit '{unit}' is deprecated and will be removed in a future version.",
+            FutureWarning,
+            stacklevel=find_stack_level(),
+        )
+
     if unit is not None:
         unit = parse_timedelta_unit(unit)
 
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 7220b44c7af9d..a08ffcc9f7200 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -624,7 +624,7 @@ def _numba_apply(
         self,
         func: Callable[..., Any],
         engine_kwargs: dict[str, bool] | None = None,
-        *func_args,
+        **func_kwargs,
     ):
         window_indexer = self._get_window_indexer()
         min_periods = (
@@ -646,10 +646,15 @@ def _numba_apply(
             step=self.step,
         )
         self._check_window_bounds(start, end, len(values))
+        # For now, map everything to float to match the Cython impl
+        # even though it is wrong
+        # TODO: Could preserve correct dtypes in future
+        # xref #53214
+        dtype_mapping = executor.float_dtype_mapping
         aggregator = executor.generate_shared_aggregator(
-            func, **get_jit_arguments(engine_kwargs)
+            func, dtype_mapping, **get_jit_arguments(engine_kwargs)
         )
-        result = aggregator(values, start, end, min_periods, *func_args)
+        result = aggregator(values.T, start, end, min_periods, **func_kwargs).T
         result = result.T if self.axis == 1 else result
         index = self._slice_axis_for_step(obj.index, result)
         if obj.ndim == 1:
@@ -1466,7 +1471,7 @@ def max(
             else:
                 from pandas.core._numba.kernels import sliding_min_max
 
-                return self._numba_apply(sliding_min_max, engine_kwargs, True)
+                return self._numba_apply(sliding_min_max, engine_kwargs, is_max=True)
         window_func = window_aggregations.roll_max
         return self._apply(window_func, name="max", numeric_only=numeric_only)
 
@@ -1488,7 +1493,7 @@ def min(
             else:
                 from pandas.core._numba.kernels import sliding_min_max
 
-                return self._numba_apply(sliding_min_max, engine_kwargs, False)
+                return self._numba_apply(sliding_min_max, engine_kwargs, is_max=False)
         window_func = window_aggregations.roll_min
         return self._apply(window_func, name="min", numeric_only=numeric_only)
 
@@ -1547,7 +1552,7 @@ def std(
                 raise NotImplementedError("std not supported with method='table'")
             from pandas.core._numba.kernels import sliding_var
 
-            return zsqrt(self._numba_apply(sliding_var, engine_kwargs, ddof))
+            return zsqrt(self._numba_apply(sliding_var, engine_kwargs, ddof=ddof))
         window_func = window_aggregations.roll_var
 
         def zsqrt_func(values, begin, end, min_periods):
@@ -1571,7 +1576,7 @@ def var(
                 raise NotImplementedError("var not supported with method='table'")
             from pandas.core._numba.kernels import sliding_var
 
-            return self._numba_apply(sliding_var, engine_kwargs, ddof)
+            return self._numba_apply(sliding_var, engine_kwargs, ddof=ddof)
         window_func = partial(window_aggregations.roll_var, ddof=ddof)
         return self._apply(
             window_func,
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 672f7c1f71b15..dafba136c141f 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -67,6 +67,7 @@ def __init__(
         doublequote: bool = True,
         escapechar: str | None = None,
         storage_options: StorageOptions = None,
+        comment: str | None = None,
     ) -> None:
         self.fmt = formatter
 
@@ -89,6 +90,7 @@ def __init__(
         self.date_format = date_format
         self.cols = self._initialize_columns(cols)
         self.chunksize = self._initialize_chunksize(chunksize)
+        self.comment = comment
 
     @property
     def na_rep(self) -> str:
@@ -260,6 +262,8 @@ def save(self) -> None:
             self._save()
 
     def _save(self) -> None:
+        if self.comment:
+            self._save_df_attrs()
         if self._need_to_save_header:
             self._save_header()
         self._save_body()
@@ -318,3 +322,10 @@ def _save_chunk(self, start_i: int, end_i: int) -> None:
             self.cols,
             self.writer,
         )
+
+    def _save_df_attrs(self) -> None:
+        for key, value in self.fmt.frame.attrs.items():
+            # remove the delimiter from the attr string values
+            key = str(key).replace(self.writer.dialect.delimiter, "")
+            value = value.replace(self.writer.dialect.delimiter, "")
+            self.writer.writerow([f"{self.comment}{key}:{value}"])
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index a425944647b5c..489df372ed5dd 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1117,6 +1117,7 @@ def to_csv(
         escapechar: str | None = None,
         errors: str = "strict",
         storage_options: StorageOptions = None,
+        comment: str | None = None,
     ) -> str | None:
         """
         Render dataframe as comma-separated file.
@@ -1147,6 +1148,7 @@ def to_csv(
             escapechar=escapechar,
             storage_options=storage_options,
             formatter=self.fmt,
+            comment=comment,
         )
         csv_formatter.save()
 
@@ -1506,14 +1508,16 @@ def format_values_with(float_format):
 
             # default formatter leaves a space to the left when formatting
             # floats, must be consistent for left-justifying NaNs (GH #25061)
-            if self.justify == "left":
-                na_rep = " " + self.na_rep
-            else:
-                na_rep = self.na_rep
+            na_rep = " " + self.na_rep if self.justify == "left" else self.na_rep
 
-            # separate the wheat from the chaff
+            # different formatting strategies for complex and non-complex data
+            # need to distinguish complex and float NaNs (GH #53762)
             values = self.values
             is_complex = is_complex_dtype(values)
+            if is_complex:
+                na_rep = f"{na_rep}+{0:.{self.digits}f}j"
+
+            # separate the wheat from the chaff
             values = format_with_na_rep(values, formatter, na_rep)
 
             if self.fixed_width:
@@ -1912,22 +1916,26 @@ def _trim_zeros_complex(str_complexes: np.ndarray, decimal: str = ".") -> list[s
     Separates the real and imaginary parts from the complex number, and
     executes the _trim_zeros_float method on each of those.
     """
-    trimmed = [
-        "".join(_trim_zeros_float(re.split(r"([j+-])", x), decimal))
-        for x in str_complexes
-    ]
-
-    # pad strings to the length of the longest trimmed string for alignment
-    lengths = [len(s) for s in trimmed]
-    max_length = max(lengths)
+    real_part, imag_part = [], []
+    for x in str_complexes:
+        # Complex numbers are represented as "(-)xxx(+/-)xxxj"
+        # The split will give [maybe "-", "xxx", "+/-", "xxx", "j", ""]
+        # Therefore, the imaginary part is the 4th and 3rd last elements,
+        # and the real part is everything before the imaginary part
+        trimmed = re.split(r"([j+-])", x)
+        real_part.append("".join(trimmed[:-4]))
+        imag_part.append("".join(trimmed[-4:-2]))
+
+    # We want to align the lengths of the real and imaginary parts of each complex
+    # number, as well as the lengths the real (resp. complex) parts of all numbers
+    # in the array
+    n = len(str_complexes)
+    padded_parts = _trim_zeros_float(real_part + imag_part, decimal)
     padded = [
-        s[: -((k - 1) // 2 + 1)]  # real part
-        + (max_length - k) // 2 * "0"
-        + s[-((k - 1) // 2 + 1) : -((k - 1) // 2)]  # + / -
-        + s[-((k - 1) // 2) : -1]  # imaginary part
-        + (max_length - k) // 2 * "0"
-        + s[-1]
-        for s, k in zip(trimmed, lengths)
+        padded_parts[i]  # real part (including - or space, possibly "NaN")
+        + padded_parts[i + n]  # imaginary part (including + or -)
+        + "j"
+        for i in range(n)
     ]
     return padded
 
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 55dacd0c268ff..260620e145105 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -14,8 +14,6 @@
     Sequence,
 )
 
-import numpy as np
-
 from pandas._config import get_option
 
 from pandas.io.formats import format as fmt
@@ -1099,4 +1097,4 @@ def _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]:
     Create mapping between datatypes and their number of occurrences.
     """
     # groupby dtype.name to collect e.g. Categorical columns
-    return df.dtypes.value_counts().groupby(lambda x: x.name).sum().astype(np.intp)
+    return df.dtypes.value_counts().groupby(lambda x: x.name).sum()
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 5c2fba814375f..eaeaedfdddfcb 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -18,6 +18,7 @@
     TypeVar,
     overload,
 )
+import warnings
 
 import numpy as np
 
@@ -30,6 +31,7 @@
 from pandas.compat._optional import import_optional_dependency
 from pandas.errors import AbstractMethodError
 from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
 from pandas.util._validators import check_dtype_backend
 
 from pandas.core.dtypes.common import ensure_str
@@ -535,6 +537,10 @@ def read_json(
         By file-like object, we refer to objects with a ``read()`` method,
         such as a file handle (e.g. via builtin ``open`` function)
         or ``StringIO``.
+
+        .. deprecated:: 2.1.0
+            Passing json literal strings is deprecated.
+
     orient : str, optional
         Indication of expected JSON string format.
         Compatible JSON strings can be produced by ``to_json()`` with a
@@ -695,6 +701,7 @@ def read_json(
 
     Examples
     --------
+    >>> from io import StringIO
     >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
     ...                   index=['row 1', 'row 2'],
     ...                   columns=['col 1', 'col 2'])
@@ -709,7 +716,7 @@ def read_json(
 "data":[["a","b"],["c","d"]]\
 }}\
 '
-    >>> pd.read_json(_, orient='split')
+    >>> pd.read_json(StringIO(_), orient='split')
           col 1 col 2
     row 1     a     b
     row 2     c     d
@@ -719,7 +726,7 @@ def read_json(
     >>> df.to_json(orient='index')
     '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}'
 
-    >>> pd.read_json(_, orient='index')
+    >>> pd.read_json(StringIO(_), orient='index')
           col 1 col 2
     row 1     a     b
     row 2     c     d
@@ -729,7 +736,7 @@ def read_json(
 
     >>> df.to_json(orient='records')
     '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]'
-    >>> pd.read_json(_, orient='records')
+    >>> pd.read_json(StringIO(_), orient='records')
       col 1 col 2
     0     a     b
     1     c     d
@@ -860,6 +867,18 @@ def __init__(
             self.nrows = validate_integer("nrows", self.nrows, 0)
             if not self.lines:
                 raise ValueError("nrows can only be passed if lines=True")
+        if (
+            isinstance(filepath_or_buffer, str)
+            and not self.lines
+            and "\n" in filepath_or_buffer
+        ):
+            warnings.warn(
+                "Passing literal json to 'read_json' is deprecated and "
+                "will be removed in a future version. To read from a "
+                "literal string, wrap it in a 'StringIO' object.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
         if self.engine == "pyarrow":
             if not self.lines:
                 raise ValueError(
@@ -925,7 +944,14 @@ def _get_data_from_filepath(self, filepath_or_buffer):
             and not file_exists(filepath_or_buffer)
         ):
             raise FileNotFoundError(f"File {filepath_or_buffer} does not exist")
-
+        else:
+            warnings.warn(
+                "Passing literal json to 'read_json' is deprecated and "
+                "will be removed in a future version. To read from a "
+                "literal string, wrap it in a 'StringIO' object.",
+                FutureWarning,
+                stacklevel=find_stack_level(),
+            )
         return filepath_or_buffer
 
     def _combine_lines(self, lines) -> str:
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 65cc369416352..2aec361d46b99 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -5,6 +5,7 @@
 from __future__ import annotations
 
 import io
+from os import PathLike
 from typing import (
     TYPE_CHECKING,
     Any,
@@ -326,10 +327,13 @@ def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]:
             )
 
         if (not hasattr(self.path_or_buffer, "read")) and (
-            not isinstance(self.path_or_buffer, str)
+            not isinstance(self.path_or_buffer, (str, PathLike))
             or is_url(self.path_or_buffer)
             or is_fsspec_url(self.path_or_buffer)
-            or self.path_or_buffer.startswith(("<?xml", "<"))
+            or (
+                isinstance(self.path_or_buffer, str)
+                and self.path_or_buffer.startswith(("<?xml", "<"))
+            )
             or infer_compression(self.path_or_buffer, "infer") is not None
         ):
             raise ParserError(
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 0f9fd948b6fe5..24b8816109677 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -98,6 +98,16 @@ def hist_series(
     See Also
     --------
     matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
+
+    Examples
+    --------
+
+    .. plot::
+        :context: close-figs
+
+        >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+        >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)
+        >>> hist = ser.groupby(level=0).hist()
     """
     plot_backend = _get_plot_backend(backend)
     return plot_backend.hist_series(
@@ -778,12 +788,23 @@ class PlotAccessor(PandasObject):
 
     Examples
     --------
+    For SeriesGroupBy:
 
     .. plot::
         :context: close-figs
 
         >>> ser = pd.Series([1, 2, 3, 3])
         >>> plot = ser.plot(kind='hist', title="My plot")
+
+    For DataFrameGroupBy:
+
+    .. plot::
+        :context: close-figs
+
+        >>> df = pd.DataFrame({'length': [1.5, 0.5, 1.2, 0.9, 3],
+        ...                   'width': [0.7, 0.2, 0.15, 0.2, 1.1]},
+        ...                   index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
+        >>> plot = df.plot()
     """
 
     _common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")
diff --git a/pandas/tests/apply/test_frame_apply_relabeling.py b/pandas/tests/apply/test_frame_apply_relabeling.py
index 2dcd228cdc19c..2652d43fd42ec 100644
--- a/pandas/tests/apply/test_frame_apply_relabeling.py
+++ b/pandas/tests/apply/test_frame_apply_relabeling.py
@@ -1,7 +1,7 @@
 import numpy as np
 import pytest
 
-from pandas.compat import is_numpy_dev
+from pandas.compat.numpy import np_version_gte1p25
 
 import pandas as pd
 import pandas._testing as tm
@@ -45,7 +45,7 @@ def test_agg_relabel_multi_columns_multi_methods():
     tm.assert_frame_equal(result, expected)
 
 
-@pytest.mark.xfail(is_numpy_dev, reason="name of min now equals name of np.min")
+@pytest.mark.xfail(np_version_gte1p25, reason="name of min now equals name of np.min")
 def test_agg_relabel_partial_functions():
     # GH 26513, test on partial, functools or more complex cases
     df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py
index 425d2fb42a711..3e0ff19ae4c1a 100644
--- a/pandas/tests/apply/test_series_apply.py
+++ b/pandas/tests/apply/test_series_apply.py
@@ -271,6 +271,19 @@ def test_apply_empty_integer_series_with_datetime_index(by_row):
     tm.assert_series_equal(result, s)
 
 
+def test_apply_dataframe_iloc():
+    uintDF = DataFrame(np.uint64([1, 2, 3, 4, 5]), columns=["Numbers"])
+    indexDF = DataFrame([2, 3, 2, 1, 2], columns=["Indices"])
+
+    def retrieve(targetRow, targetDF):
+        val = targetDF["Numbers"].iloc[targetRow]
+        return val
+
+    result = indexDF["Indices"].apply(retrieve, args=(uintDF,))
+    expected = Series([3, 4, 3, 2, 3], name="Indices", dtype="uint64")
+    tm.assert_series_equal(result, expected)
+
+
 def test_transform(string_series, by_row):
     # transforming functions
 
diff --git a/pandas/tests/arrays/sparse/test_indexing.py b/pandas/tests/arrays/sparse/test_indexing.py
index f639e9b18596c..d63d0fb07b404 100644
--- a/pandas/tests/arrays/sparse/test_indexing.py
+++ b/pandas/tests/arrays/sparse/test_indexing.py
@@ -6,18 +6,25 @@
 import pandas._testing as tm
 from pandas.core.arrays.sparse import SparseArray
 
-arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
-arr = SparseArray(arr_data)
+
+@pytest.fixture
+def arr_data():
+    return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
+
+
+@pytest.fixture
+def arr(arr_data):
+    return SparseArray(arr_data)
 
 
 class TestGetitem:
-    def test_getitem(self):
+    def test_getitem(self, arr):
         dense = arr.to_dense()
         for i, value in enumerate(arr):
             tm.assert_almost_equal(value, dense[i])
             tm.assert_almost_equal(arr[-i], dense[-i])
 
-    def test_getitem_arraylike_mask(self):
+    def test_getitem_arraylike_mask(self, arr):
         arr = SparseArray([0, 1, 2])
         result = arr[[True, False, True]]
         expected = SparseArray([0, 2])
@@ -81,7 +88,7 @@ def test_boolean_slice_empty(self):
         res = arr[[False, False, False]]
         assert res.dtype == arr.dtype
 
-    def test_getitem_bool_sparse_array(self):
+    def test_getitem_bool_sparse_array(self, arr):
         # GH 23122
         spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True)
         exp = SparseArray([np.nan, 2, np.nan, 5, 6])
@@ -106,7 +113,7 @@ def test_getitem_bool_sparse_array_as_comparison(self):
         exp = SparseArray([3.0, 4.0], fill_value=np.nan)
         tm.assert_sp_array_equal(res, exp)
 
-    def test_get_item(self):
+    def test_get_item(self, arr):
         zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
 
         assert np.isnan(arr[1])
@@ -129,7 +136,7 @@ def test_get_item(self):
 
 
 class TestSetitem:
-    def test_set_item(self):
+    def test_set_item(self, arr_data):
         arr = SparseArray(arr_data).copy()
 
         def setitem():
@@ -146,12 +153,12 @@ def setslice():
 
 
 class TestTake:
-    def test_take_scalar_raises(self):
+    def test_take_scalar_raises(self, arr):
         msg = "'indices' must be an array, not a scalar '2'."
         with pytest.raises(ValueError, match=msg):
             arr.take(2)
 
-    def test_take(self):
+    def test_take(self, arr_data, arr):
         exp = SparseArray(np.take(arr_data, [2, 3]))
         tm.assert_sp_array_equal(arr.take([2, 3]), exp)
 
@@ -173,14 +180,14 @@ def test_take_fill_value(self):
         exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
         tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
 
-    def test_take_negative(self):
+    def test_take_negative(self, arr_data, arr):
         exp = SparseArray(np.take(arr_data, [-1]))
         tm.assert_sp_array_equal(arr.take([-1]), exp)
 
         exp = SparseArray(np.take(arr_data, [-4, -3, -2]))
         tm.assert_sp_array_equal(arr.take([-4, -3, -2]), exp)
 
-    def test_bad_take(self):
+    def test_bad_take(self, arr):
         with pytest.raises(IndexError, match="bounds"):
             arr.take([11])
 
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index b7517b1b16445..7a77a2064e7e0 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -14,77 +14,74 @@
     make_sparse_index,
 )
 
-TEST_LENGTH = 20
-
-plain_case = [
-    [0, 7, 15],
-    [3, 5, 5],
-    [2, 9, 14],
-    [2, 3, 5],
-    [2, 9, 15],
-    [1, 3, 4],
-]
-delete_blocks = [
-    [0, 5],
-    [4, 4],
-    [1],
-    [4],
-    [1],
-    [3],
-]
-split_blocks = [
-    [0],
-    [10],
-    [0, 5],
-    [3, 7],
-    [0, 5],
-    [3, 5],
-]
-skip_block = [
-    [10],
-    [5],
-    [0, 12],
-    [5, 3],
-    [12],
-    [3],
-]
-
-no_intersect = [
-    [0, 10],
-    [4, 6],
-    [5, 17],
-    [4, 2],
-    [],
-    [],
-]
-
-one_empty = [
-    [0],
-    [5],
-    [],
-    [],
-    [],
-    [],
-]
-
-both_empty = [  # type: ignore[var-annotated]
-    [],
-    [],
-    [],
-    [],
-    [],
-    [],
-]
-
-CASES = [plain_case, delete_blocks, split_blocks, skip_block, no_intersect, one_empty]
-IDS = [
-    "plain_case",
-    "delete_blocks",
-    "split_blocks",
-    "skip_block",
-    "no_intersect",
-    "one_empty",
-]
+
+@pytest.fixture
+def test_length():
+    return 20
+
+
+@pytest.fixture(
+    params=[
+        [
+            [0, 7, 15],
+            [3, 5, 5],
+            [2, 9, 14],
+            [2, 3, 5],
+            [2, 9, 15],
+            [1, 3, 4],
+        ],
+        [
+            [0, 5],
+            [4, 4],
+            [1],
+            [4],
+            [1],
+            [3],
+        ],
+        [
+            [0],
+            [10],
+            [0, 5],
+            [3, 7],
+            [0, 5],
+            [3, 5],
+        ],
+        [
+            [10],
+            [5],
+            [0, 12],
+            [5, 3],
+            [12],
+            [3],
+        ],
+        [
+            [0, 10],
+            [4, 6],
+            [5, 17],
+            [4, 2],
+            [],
+            [],
+        ],
+        [
+            [0],
+            [5],
+            [],
+            [],
+            [],
+            [],
+        ],
+    ],
+    ids=[
+        "plain_case",
+        "delete_blocks",
+        "split_blocks",
+        "skip_block",
+        "no_intersect",
+        "one_empty",
+    ],
+)
+def cases(request):
+    return request.param
 
 
 class TestSparseIndexUnion:
@@ -101,7 +98,7 @@ class TestSparseIndexUnion:
             [[0, 10], [3, 3], [5, 15], [2, 2], [0, 5, 10, 15], [3, 2, 3, 2]],
         ],
     )
-    def test_index_make_union(self, xloc, xlen, yloc, ylen, eloc, elen):
+    def test_index_make_union(self, xloc, xlen, yloc, ylen, eloc, elen, test_length):
         # Case 1
         # x: ----
         # y:     ----
@@ -132,8 +129,8 @@ def test_index_make_union(self, xloc, xlen, yloc, ylen, eloc, elen):
         # Case 8
         # x: ----       ---
         # y:       ---       ---
-        xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
-        yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
+        xindex = BlockIndex(test_length, xloc, xlen)
+        yindex = BlockIndex(test_length, yloc, ylen)
         bresult = xindex.make_union(yindex)
         assert isinstance(bresult, BlockIndex)
         tm.assert_numpy_array_equal(bresult.blocs, np.array(eloc, dtype=np.int32))
@@ -180,12 +177,12 @@ def test_int_index_make_union(self):
 
 class TestSparseIndexIntersect:
     @td.skip_if_windows
-    @pytest.mark.parametrize("xloc, xlen, yloc, ylen, eloc, elen", CASES, ids=IDS)
-    def test_intersect(self, xloc, xlen, yloc, ylen, eloc, elen):
-        xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
-        yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
-        expected = BlockIndex(TEST_LENGTH, eloc, elen)
-        longer_index = BlockIndex(TEST_LENGTH + 1, yloc, ylen)
+    def test_intersect(self, cases, test_length):
+        xloc, xlen, yloc, ylen, eloc, elen = cases
+        xindex = BlockIndex(test_length, xloc, xlen)
+        yindex = BlockIndex(test_length, yloc, ylen)
+        expected = BlockIndex(test_length, eloc, elen)
+        longer_index = BlockIndex(test_length + 1, yloc, ylen)
 
         result = xindex.intersect(yindex)
         assert result.equals(expected)
@@ -493,10 +490,10 @@ def test_equals(self):
         assert index.equals(index)
         assert not index.equals(IntIndex(10, [0, 1, 2, 3]))
 
-    @pytest.mark.parametrize("xloc, xlen, yloc, ylen, eloc, elen", CASES, ids=IDS)
-    def test_to_block_index(self, xloc, xlen, yloc, ylen, eloc, elen):
-        xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
-        yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
+    def test_to_block_index(self, cases, test_length):
+        xloc, xlen, yloc, ylen, _, _ = cases
+        xindex = BlockIndex(test_length, xloc, xlen)
+        yindex = BlockIndex(test_length, yloc, ylen)
 
         # see if survive the round trip
         xbindex = xindex.to_int_index().to_block_index()
@@ -512,13 +509,13 @@ def test_to_int_index(self):
 
 class TestSparseOperators:
     @pytest.mark.parametrize("opname", ["add", "sub", "mul", "truediv", "floordiv"])
-    @pytest.mark.parametrize("xloc, xlen, yloc, ylen, eloc, elen", CASES, ids=IDS)
-    def test_op(self, opname, xloc, xlen, yloc, ylen, eloc, elen):
+    def test_op(self, opname, cases, test_length):
+        xloc, xlen, yloc, ylen, _, _ = cases
         sparse_op = getattr(splib, f"sparse_{opname}_float64")
         python_op = getattr(operator, opname)
 
-        xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
-        yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
+        xindex = BlockIndex(test_length, xloc, xlen)
+        yindex = BlockIndex(test_length, yloc, ylen)
 
         xdindex = xindex.to_int_index()
         ydindex = yindex.to_int_index()
@@ -542,10 +539,10 @@ def test_op(self, opname, xloc, xlen, yloc, ylen, eloc, elen):
 
         # check versus Series...
         xseries = Series(x, xdindex.indices)
-        xseries = xseries.reindex(np.arange(TEST_LENGTH)).fillna(xfill)
+        xseries = xseries.reindex(np.arange(test_length)).fillna(xfill)
 
         yseries = Series(y, ydindex.indices)
-        yseries = yseries.reindex(np.arange(TEST_LENGTH)).fillna(yfill)
+        yseries = yseries.reindex(np.arange(test_length)).fillna(yfill)
 
         series_result = python_op(xseries, yseries)
         series_result = series_result.reindex(ri_index.indices)
diff --git a/pandas/tests/copy_view/index/test_datetimeindex.py b/pandas/tests/copy_view/index/test_datetimeindex.py
index f691d5589f48c..f54beca4cc414 100644
--- a/pandas/tests/copy_view/index/test_datetimeindex.py
+++ b/pandas/tests/copy_view/index/test_datetimeindex.py
@@ -54,3 +54,12 @@ def test_datetimeindex_isocalendar(using_copy_on_write):
     ser.iloc[0] = Timestamp("2020-12-31")
     if using_copy_on_write:
         tm.assert_index_equal(df.index, expected)
+
+
+def test_index_values(using_copy_on_write):
+    idx = date_range("2019-12-31", periods=3, freq="D")
+    result = idx.values
+    if using_copy_on_write:
+        assert result.flags.writeable is False
+    else:
+        assert result.flags.writeable is True
diff --git a/pandas/tests/copy_view/index/test_index.py b/pandas/tests/copy_view/index/test_index.py
index 5e9c04c0adfc3..6411e20a972e7 100644
--- a/pandas/tests/copy_view/index/test_index.py
+++ b/pandas/tests/copy_view/index/test_index.py
@@ -153,3 +153,26 @@ def test_infer_objects(using_copy_on_write):
     view_.iloc[0, 0] = "aaaa"
     if using_copy_on_write:
         tm.assert_index_equal(idx, expected, check_names=False)
+
+
+def test_index_to_frame(using_copy_on_write):
+    idx = Index([1, 2, 3], name="a")
+    expected = idx.copy(deep=True)
+    df = idx.to_frame()
+    if using_copy_on_write:
+        assert np.shares_memory(get_array(df, "a"), idx._values)
+        assert not df._mgr._has_no_reference(0)
+    else:
+        assert not np.shares_memory(get_array(df, "a"), idx._values)
+
+    df.iloc[0, 0] = 100
+    tm.assert_index_equal(idx, expected)
+
+
+def test_index_values(using_copy_on_write):
+    idx = Index([1, 2, 3])
+    result = idx.values
+    if using_copy_on_write:
+        assert result.flags.writeable is False
+    else:
+        assert result.flags.writeable is True
diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py
index ad7812778afd8..af7e759902f9f 100644
--- a/pandas/tests/copy_view/test_constructors.py
+++ b/pandas/tests/copy_view/test_constructors.py
@@ -340,3 +340,15 @@ def test_dataframe_from_records_with_dataframe(using_copy_on_write):
         tm.assert_frame_equal(df, df_orig)
     else:
         tm.assert_frame_equal(df, df2)
+
+
+def test_frame_from_dict_of_index(using_copy_on_write):
+    idx = Index([1, 2, 3])
+    expected = idx.copy(deep=True)
+    df = DataFrame({"a": idx}, copy=False)
+    assert np.shares_memory(get_array(df, "a"), idx._values)
+    if using_copy_on_write:
+        assert not df._mgr._has_no_reference(0)
+
+        df.iloc[0, 0] = 100
+        tm.assert_index_equal(idx, expected)
diff --git a/pandas/tests/copy_view/test_interp_fillna.py b/pandas/tests/copy_view/test_interp_fillna.py
index 576d3a9cdedde..71023a538a2c3 100644
--- a/pandas/tests/copy_view/test_interp_fillna.py
+++ b/pandas/tests/copy_view/test_interp_fillna.py
@@ -20,7 +20,12 @@ def test_interpolate_no_op(using_copy_on_write, method):
     df = DataFrame({"a": [1, 2]})
     df_orig = df.copy()
 
-    result = df.interpolate(method=method)
+    warn = None
+    if method == "pad":
+        warn = FutureWarning
+    msg = "DataFrame.interpolate with method=pad is deprecated"
+    with tm.assert_produces_warning(warn, match=msg):
+        result = df.interpolate(method=method)
 
     if using_copy_on_write:
         assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
@@ -108,7 +113,9 @@ def test_interpolate_cleaned_fill_method(using_copy_on_write):
     df = DataFrame({"a": ["a", np.nan, "c"], "b": 1})
     df_orig = df.copy()
 
-    result = df.interpolate(method="asfreq")
+    msg = "DataFrame.interpolate with object dtype"
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        result = df.interpolate(method="linear")
 
     if using_copy_on_write:
         assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
@@ -125,7 +132,9 @@ def test_interpolate_cleaned_fill_method(using_copy_on_write):
 def test_interpolate_object_convert_no_op(using_copy_on_write):
     df = DataFrame({"a": ["a", "b", "c"], "b": 1})
     arr_a = get_array(df, "a")
-    df.interpolate(method="pad", inplace=True)
+    msg = "DataFrame.interpolate with method=pad is deprecated"
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        df.interpolate(method="pad", inplace=True)
 
     # Now CoW makes a copy, it should not!
     if using_copy_on_write:
@@ -136,7 +145,9 @@ def test_interpolate_object_convert_no_op(using_copy_on_write):
 def test_interpolate_object_convert_copies(using_copy_on_write):
     df = DataFrame({"a": Series([1, 2], dtype=object), "b": 1})
     arr_a = get_array(df, "a")
-    df.interpolate(method="pad", inplace=True)
+    msg = "DataFrame.interpolate with method=pad is deprecated"
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        df.interpolate(method="pad", inplace=True)
 
     if using_copy_on_write:
         assert df._mgr._has_no_reference(0)
@@ -146,7 +157,9 @@ def test_interpolate_object_convert_copies(using_copy_on_write):
 def test_interpolate_downcast(using_copy_on_write):
     df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})
     arr_a = get_array(df, "a")
-    df.interpolate(method="pad", inplace=True, downcast="infer")
+    msg = "DataFrame.interpolate with method=pad is deprecated"
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        df.interpolate(method="pad", inplace=True, downcast="infer")
 
     if using_copy_on_write:
         assert df._mgr._has_no_reference(0)
@@ -158,7 +171,9 @@ def test_interpolate_downcast_reference_triggers_copy(using_copy_on_write):
     df_orig = df.copy()
     arr_a = get_array(df, "a")
     view = df[:]
-    df.interpolate(method="pad", inplace=True, downcast="infer")
+    msg = "DataFrame.interpolate with method=pad is deprecated"
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        df.interpolate(method="pad", inplace=True, downcast="infer")
 
     if using_copy_on_write:
         assert df._mgr._has_no_reference(0)
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index 67fc91e0567ef..294fd5636b7b5 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -367,6 +367,36 @@ def test_reindex_columns(using_copy_on_write):
     tm.assert_frame_equal(df, df_orig)
 
 
+@pytest.mark.parametrize(
+    "index",
+    [
+        lambda idx: idx,
+        lambda idx: idx.view(),
+        lambda idx: idx.copy(),
+        lambda idx: list(idx),
+    ],
+    ids=["identical", "view", "copy", "values"],
+)
+def test_reindex_rows(index, using_copy_on_write):
+    # Case: reindexing the rows with an index that matches the current index
+    # can use a shallow copy
+    df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+    df_orig = df.copy()
+    df2 = df.reindex(index=index(df.index))
+
+    if using_copy_on_write:
+        # still shares memory (df2 is a shallow copy)
+        assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+    else:
+        assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+    # mutating df2 triggers a copy-on-write for that column
+    df2.iloc[0, 0] = 0
+    assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+    if using_copy_on_write:
+        assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+    tm.assert_frame_equal(df, df_orig)
+
+
 def test_drop_on_column(using_copy_on_write):
     df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
     df_orig = df.copy()
@@ -1734,6 +1764,32 @@ def test_transpose_ea_single_column(using_copy_on_write):
     assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
 
 
+def test_transform_frame(using_copy_on_write):
+    df = DataFrame({"a": [1, 2, 3], "b": 1})
+    df_orig = df.copy()
+
+    def func(ser):
+        ser.iloc[0] = 100
+        return ser
+
+    df.transform(func)
+    if using_copy_on_write:
+        tm.assert_frame_equal(df, df_orig)
+
+
+def test_transform_series(using_copy_on_write):
+    ser = Series([1, 2, 3])
+    ser_orig = ser.copy()
+
+    def func(ser):
+        ser.iloc[0] = 100
+        return ser
+
+    ser.transform(func)
+    if using_copy_on_write:
+        tm.assert_series_equal(ser, ser_orig)
+
+
 def test_count_read_only_array():
     df = DataFrame({"a": [1, 2], "b": 3})
     result = df.count()
diff --git a/pandas/tests/copy_view/test_setitem.py b/pandas/tests/copy_view/test_setitem.py
index 2a99a00e249fa..5016b57bdd0b7 100644
--- a/pandas/tests/copy_view/test_setitem.py
+++ b/pandas/tests/copy_view/test_setitem.py
@@ -58,18 +58,12 @@ def test_set_column_with_index(using_copy_on_write):
     # the index data is copied
     assert not np.shares_memory(get_array(df, "c"), idx.values)
 
-    # and thus modifying the index does not modify the DataFrame
-    idx.values[0] = 0
-    tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
-
     idx = RangeIndex(1, 4)
     arr = idx.values
 
     df["d"] = idx
 
     assert not np.shares_memory(get_array(df, "d"), arr)
-    arr[0] = 0
-    tm.assert_series_equal(df["d"], Series([1, 2, 3], name="d"))
 
 
 def test_set_columns_with_dataframe(using_copy_on_write):
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index bd8dffd2abe1f..1c275098adeb9 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -9,7 +9,7 @@
 
 from pandas._libs import missing as libmissing
 from pandas._libs.tslibs import iNaT
-from pandas.compat import is_numpy_dev
+from pandas.compat.numpy import np_version_gte1p25
 
 from pandas.core.dtypes.common import (
     is_float,
@@ -468,7 +468,7 @@ def test_array_equivalent_series(val):
     cm = (
         # stacklevel is chosen to make sense when called from .equals
         tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False)
-        if isinstance(val, str) and not is_numpy_dev
+        if isinstance(val, str) and not np_version_gte1p25
         else nullcontext()
     )
     with cm:
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index 9b8f3a43fbe64..bc8781eacfe06 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -1,3 +1,5 @@
+import re
+
 import pytest
 
 from pandas.core.dtypes.common import (
@@ -141,7 +143,16 @@ def test_in_numeric_groupby(self, data_for_grouping):
             result = df.groupby("A").sum().columns
         else:
             expected = pd.Index(["C"])
-            with pytest.raises(TypeError, match="does not support"):
-                df.groupby("A").sum().columns
+
+            msg = "|".join(
+                [
+                    # period/datetime
+                    "does not support sum operations",
+                    # all others
+                    re.escape(f"agg function failed [how->sum,dtype->{dtype}"),
+                ]
+            )
+            with pytest.raises(TypeError, match=msg):
+                df.groupby("A").sum()
             result = df.groupby("A").sum(numeric_only=True).columns
         tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 3e495e9ac6814..393c01488c234 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -25,6 +25,7 @@
     is_scalar,
 )
 from pandas.core import arraylike
+from pandas.core.algorithms import value_counts_internal as value_counts
 from pandas.core.arraylike import OpsMixin
 from pandas.core.arrays import (
     ExtensionArray,
@@ -273,8 +274,6 @@ def convert_values(param):
         return np.asarray(res, dtype=bool)
 
     def value_counts(self, dropna: bool = True):
-        from pandas.core.algorithms import value_counts
-
         return value_counts(self.to_numpy(), dropna=dropna)
 
 
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 7fda870e6f721..0920e70142446 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -240,10 +240,6 @@ class TestReduce(base.BaseNoReduceTests):
 
 
 class TestMethods(BaseJSON, base.BaseMethodsTests):
-    @pytest.mark.xfail(reason="ValueError: setting an array element with a sequence")
-    def test_hash_pandas_object(self, data):
-        super().test_hash_pandas_object(data)
-
     @unhashable
     def test_value_counts(self, all_data, dropna):
         super().test_value_counts(all_data, dropna)
@@ -286,10 +282,6 @@ def test_combine_add(self, data_repeated):
     def test_combine_first(self, data):
         super().test_combine_first(data)
 
-    @unhashable
-    def test_hash_pandas_object_works(self, data, kind):
-        super().test_hash_pandas_object_works(data, kind)
-
     @pytest.mark.xfail(reason="broadcasting error")
     def test_where_series(self, data, na_value):
         # Fails with
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 8625500a83e79..abcca16340365 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -585,7 +585,8 @@ def test_groupby_extension_agg(self, as_index, data_for_grouping, request):
         super().test_groupby_extension_agg(as_index, data_for_grouping)
 
     def test_in_numeric_groupby(self, data_for_grouping):
-        if is_string_dtype(data_for_grouping.dtype):
+        dtype = data_for_grouping.dtype
+        if is_string_dtype(dtype):
             df = pd.DataFrame(
                 {
                     "A": [1, 1, 2, 2, 3, 3, 1, 4],
@@ -595,8 +596,9 @@ def test_in_numeric_groupby(self, data_for_grouping):
             )
 
             expected = pd.Index(["C"])
-            with pytest.raises(TypeError, match="does not support"):
-                df.groupby("A").sum().columns
+            msg = re.escape(f"agg function failed [how->sum,dtype->{dtype}")
+            with pytest.raises(TypeError, match=msg):
+                df.groupby("A").sum()
             result = df.groupby("A").sum(numeric_only=True).columns
             tm.assert_index_equal(result, expected)
         else:
@@ -2026,6 +2028,13 @@ def test_str_join():
     tm.assert_series_equal(result, expected)
 
 
+def test_str_join_string_type():
+    ser = pd.Series(ArrowExtensionArray(pa.array(["abc", "123", None])))
+    result = ser.str.join("=")
+    expected = pd.Series(["a=b=c", "1=2=3", None], dtype=ArrowDtype(pa.string()))
+    tm.assert_series_equal(result, expected)
+
+
 @pytest.mark.parametrize(
     "start, stop, step, exp",
     [
@@ -2865,6 +2874,15 @@ def test_conversion_large_dtypes_from_numpy_array(data, arrow_dtype):
     tm.assert_extension_array_equal(result, expected)
 
 
+def test_concat_null_array():
+    df = pd.DataFrame({"a": [None, None]}, dtype=ArrowDtype(pa.null()))
+    df2 = pd.DataFrame({"a": [0, 1]}, dtype="int64[pyarrow]")
+
+    result = pd.concat([df, df2], ignore_index=True)
+    expected = pd.DataFrame({"a": [None, None, 0, 1]}, dtype="int64[pyarrow]")
+    tm.assert_frame_equal(result, expected)
+
+
 @pytest.mark.parametrize("pa_type", tm.ALL_INT_PYARROW_DTYPES + tm.FLOAT_PYARROW_DTYPES)
 def test_describe_numeric_data(pa_type):
     # GH 52470
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index f331449489bcc..91ca358ca0709 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -319,3 +319,12 @@ def test_repr_2d(self, data):
 
         res = repr(data.reshape(-1, 1))
         assert res.count("\nCategories") == 1
+
+
+def test_astype_category_readonly_mask_values():
+    # GH 53658
+    df = pd.DataFrame([0, 1, 2], dtype="Int64")
+    df._mgr.arrays[0]._mask.flags["WRITEABLE"] = False
+    result = df.astype("category")
+    expected = pd.DataFrame([0, 1, 2], dtype="Int64").astype("category")
+    tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py
index 97cf75acbd629..054ea842b3e6f 100644
--- a/pandas/tests/frame/conftest.py
+++ b/pandas/tests/frame/conftest.py
@@ -1,3 +1,5 @@
+from io import StringIO
+
 import numpy as np
 import pytest
 
@@ -5,6 +7,7 @@
     DataFrame,
     NaT,
     date_range,
+    read_csv,
 )
 import pandas._testing as tm
 
@@ -259,3 +262,26 @@ def frame_of_index_cols():
         }
     )
     return df
+
+
+@pytest.fixture
+def comments_attrs():
+    return {
+        "one": "Hello",
+        "two": "Hello World",
+        "three": "Hello, World!",
+        "four,": "comma in keym",
+    }
+
+
+@pytest.fixture
+def data_for_comments_raw():
+    data = "col1,col2,col3\n0,0,0\n1,1,1\n2,2,2\n"
+    return data
+
+
+@pytest.fixture
+def frame_for_comments(data_for_comments_raw, comments_attrs):
+    df = read_csv(StringIO(data_for_comments_raw))
+    df.attrs = comments_attrs
+    return df
diff --git a/pandas/tests/frame/methods/test_align.py b/pandas/tests/frame/methods/test_align.py
index 1dabc95a0f6f4..e56d542972e63 100644
--- a/pandas/tests/frame/methods/test_align.py
+++ b/pandas/tests/frame/methods/test_align.py
@@ -14,6 +14,14 @@
 
 
 class TestDataFrameAlign:
+    def test_align_asfreq_method_raises(self):
+        df = DataFrame({"A": [1, np.nan, 2]})
+        msg = "Invalid fill method"
+        msg2 = "The 'method', 'limit', and 'fill_axis' keywords"
+        with pytest.raises(ValueError, match=msg):
+            with tm.assert_produces_warning(FutureWarning, match=msg2):
+                df.align(df.iloc[::-1], method="asfreq")
+
     def test_frame_align_aware(self):
         idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern")
         idx2 = date_range("2001", periods=5, freq="2H", tz="US/Eastern")
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index 7983aace587c6..156e50d50a9ef 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -510,7 +510,7 @@ def test_combine_first_duplicates_rows_for_nan_index_values():
             "y": [12.0, 13.0, np.nan, 14.0],
         },
         index=MultiIndex.from_arrays(
-            [[1, 2, 3, 4], [np.nan, 5.0, 6.0, 7.0]], names=["a", "b"]
+            [[1, 2, 3, 4], [np.nan, 5, 6, 7]], names=["a", "b"]
         ),
     )
     combined = df1.combine_first(df2)
@@ -538,3 +538,11 @@ def test_midx_losing_dtype():
     )
     expected = DataFrame({"a": [np.nan, 4, 3, 3]}, index=expected_midx)
     tm.assert_frame_equal(result, expected)
+
+
+def test_combine_first_empty_columns():
+    left = DataFrame(columns=["a", "b"])
+    right = DataFrame(columns=["a", "c"])
+    result = left.combine_first(right)
+    expected = DataFrame(columns=["a", "b", "c"])
+    tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_compare.py b/pandas/tests/frame/methods/test_compare.py
index 6369a624bce71..a4d0a7068a3a6 100644
--- a/pandas/tests/frame/methods/test_compare.py
+++ b/pandas/tests/frame/methods/test_compare.py
@@ -1,7 +1,7 @@
 import numpy as np
 import pytest
 
-from pandas.compat import is_numpy_dev
+from pandas.compat.numpy import np_version_gte1p25
 
 import pandas as pd
 import pandas._testing as tm
@@ -269,7 +269,7 @@ def test_compare_ea_and_np_dtype(val1, val2):
         # GH#18463 TODO: is this really the desired behavior?
         expected.loc[1, ("a", "self")] = np.nan
 
-    if val1 is pd.NA and is_numpy_dev:
+    if val1 is pd.NA and np_version_gte1p25:
         # can't compare with numpy array if it contains pd.NA
         with pytest.raises(TypeError, match="boolean value of NA is ambiguous"):
             result = df1.compare(df2, keep_shape=True)
diff --git a/pandas/tests/frame/methods/test_convert_dtypes.py b/pandas/tests/frame/methods/test_convert_dtypes.py
index 2adee158379bb..082ef025992dd 100644
--- a/pandas/tests/frame/methods/test_convert_dtypes.py
+++ b/pandas/tests/frame/methods/test_convert_dtypes.py
@@ -146,7 +146,7 @@ def test_pyarrow_engine_lines_false(self):
         with pytest.raises(ValueError, match=msg):
             df.convert_dtypes(dtype_backend="numpy")
 
-    def test_pyarrow_backend_no_convesion(self):
+    def test_pyarrow_backend_no_conversion(self):
         # GH#52872
         pytest.importorskip("pyarrow")
         df = pd.DataFrame({"a": [1, 2], "b": 1.5, "c": True, "d": "x"})
@@ -159,3 +159,11 @@ def test_pyarrow_backend_no_convesion(self):
             dtype_backend="pyarrow",
         )
         tm.assert_frame_equal(result, expected)
+
+    def test_convert_dtypes_pyarrow_to_np_nullable(self):
+        # GH 53648
+        pytest.importorskip("pyarrow")
+        ser = pd.DataFrame(range(2), dtype="int32[pyarrow]")
+        result = ser.convert_dtypes(dtype_backend="numpy_nullable")
+        expected = pd.DataFrame(range(2), dtype="Int32")
+        tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_duplicated.py b/pandas/tests/frame/methods/test_duplicated.py
index 9d46a8abb9b46..5b07572ca9ad1 100644
--- a/pandas/tests/frame/methods/test_duplicated.py
+++ b/pandas/tests/frame/methods/test_duplicated.py
@@ -1,4 +1,5 @@
 import re
+import sys
 
 import numpy as np
 import pytest
@@ -21,14 +22,17 @@ def test_duplicated_with_misspelled_column_name(subset):
         df.duplicated(subset)
 
 
-@pytest.mark.slow
-def test_duplicated_do_not_fail_on_wide_dataframes():
+def test_duplicated_implemented_no_recursion():
     # gh-21524
-    # Given the wide dataframe with a lot of columns
-    # with different (important!) values
-    data = {f"col_{i:02d}": np.random.randint(0, 1000, 30000) for i in range(100)}
-    df = DataFrame(data).T
-    result = df.duplicated()
+    # Ensure duplicated isn't implemented using recursion that
+    # can fail on wide frames
+    df = DataFrame(np.random.randint(0, 1000, (10, 1000)))
+    rec_limit = sys.getrecursionlimit()
+    try:
+        sys.setrecursionlimit(100)
+        result = df.duplicated()
+    finally:
+        sys.setrecursionlimit(rec_limit)
 
     # Then duplicates produce the bool Series as a result and don't fail during
     # calculation. Actual values doesn't matter here, though usually it's all
diff --git a/pandas/tests/frame/methods/test_first_and_last.py b/pandas/tests/frame/methods/test_first_and_last.py
index 18173f7c66198..2e85edc7ed0ea 100644
--- a/pandas/tests/frame/methods/test_first_and_last.py
+++ b/pandas/tests/frame/methods/test_first_and_last.py
@@ -11,6 +11,7 @@
 import pandas._testing as tm
 
 deprecated_msg = "first is deprecated"
+last_deprecated_msg = "last is deprecated"
 
 
 class TestFirst:
@@ -55,29 +56,38 @@ def test_first_last_raises(self, frame_or_series):
             obj.first("1D")
 
         msg = "'last' only supports a DatetimeIndex index"
-        with pytest.raises(TypeError, match=msg):  # index is not a DatetimeIndex
+        with tm.assert_produces_warning(
+            FutureWarning, match=last_deprecated_msg
+        ), pytest.raises(
+            TypeError, match=msg
+        ):  # index is not a DatetimeIndex
             obj.last("1D")
 
     def test_last_subset(self, frame_or_series):
         ts = tm.makeTimeDataFrame(freq="12h")
         ts = tm.get_obj(ts, frame_or_series)
-        result = ts.last("10d")
+        with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg):
+            result = ts.last("10d")
         assert len(result) == 20
 
         ts = tm.makeTimeDataFrame(nper=30, freq="D")
         ts = tm.get_obj(ts, frame_or_series)
-        result = ts.last("10d")
+        with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg):
+            result = ts.last("10d")
         assert len(result) == 10
 
-        result = ts.last("21D")
+        with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg):
+            result = ts.last("21D")
         expected = ts["2000-01-10":]
         tm.assert_equal(result, expected)
 
-        result = ts.last("21D")
+        with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg):
+            result = ts.last("21D")
         expected = ts[-21:]
         tm.assert_equal(result, expected)
 
-        result = ts[:0].last("3M")
+        with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg):
+            result = ts[:0].last("3M")
         tm.assert_equal(result, ts[:0])
 
     @pytest.mark.parametrize("start, periods", [("2010-03-31", 1), ("2010-03-30", 2)])
@@ -104,7 +114,8 @@ def test_first_with_first_day_end_of_frq_n_greater_one(self, frame_or_series):
     def test_empty_not_input(self):
         # GH#51032
         df = DataFrame(index=pd.DatetimeIndex([]))
-        result = df.last(offset=1)
+        with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg):
+            result = df.last(offset=1)
 
         with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):
             result = df.first(offset=1)
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index 9eb1073f4c69c..429f3678c34f9 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -13,6 +13,20 @@
 
 
 class TestDataFrameInterpolate:
+    def test_interpolate_complex(self):
+        # GH#53635
+        ser = Series([complex("1+1j"), float("nan"), complex("2+2j")])
+        assert ser.dtype.kind == "c"
+
+        res = ser.interpolate()
+        expected = Series([ser[0], ser[0] * 1.5, ser[2]])
+        tm.assert_series_equal(res, expected)
+
+        df = ser.to_frame()
+        res = df.interpolate()
+        expected = expected.to_frame()
+        tm.assert_frame_equal(res, expected)
+
     def test_interpolate_datetimelike_values(self, frame_or_series):
         # GH#11312, GH#51005
         orig = Series(date_range("2012-01-01", periods=5))
@@ -69,7 +83,9 @@ def test_interp_basic(self, using_copy_on_write):
                 "D": list("abcd"),
             }
         )
-        result = df.interpolate()
+        msg = "DataFrame.interpolate with object dtype"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = df.interpolate()
         tm.assert_frame_equal(result, expected)
 
         # check we didn't operate inplace GH#45791
@@ -82,7 +98,8 @@ def test_interp_basic(self, using_copy_on_write):
             assert not np.shares_memory(cvalues, result["C"]._values)
             assert not np.shares_memory(dvalues, result["D"]._values)
 
-        res = df.interpolate(inplace=True)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            res = df.interpolate(inplace=True)
         assert res is None
         tm.assert_frame_equal(df, expected)
 
@@ -100,7 +117,9 @@ def test_interp_basic_with_non_range_index(self):
             }
         )
 
-        result = df.set_index("C").interpolate()
+        msg = "DataFrame.interpolate with object dtype"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = df.set_index("C").interpolate()
         expected = df.set_index("C")
         expected.loc[3, "A"] = 3
         expected.loc[5, "B"] = 9
@@ -120,7 +139,6 @@ def test_interp_bad_method(self):
                 "A": [1, 2, np.nan, 4],
                 "B": [1, 4, 9, np.nan],
                 "C": [1, 2, 3, 5],
-                "D": list("abcd"),
             }
         )
         msg = (
@@ -436,7 +454,9 @@ def test_interp_fillna_methods(self, request, axis, method, using_array_manager)
         )
         method2 = method if method != "pad" else "ffill"
         expected = getattr(df, method2)(axis=axis)
-        result = df.interpolate(method=method, axis=axis)
+        msg = f"DataFrame.interpolate with method={method} is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = df.interpolate(method=method, axis=axis)
         tm.assert_frame_equal(result, expected)
 
     def test_interpolate_empty_df(self):
diff --git a/pandas/tests/frame/methods/test_nlargest.py b/pandas/tests/frame/methods/test_nlargest.py
index b5c33a41dd780..17dea51263222 100644
--- a/pandas/tests/frame/methods/test_nlargest.py
+++ b/pandas/tests/frame/methods/test_nlargest.py
@@ -9,6 +9,7 @@
 
 import pandas as pd
 import pandas._testing as tm
+from pandas.util.version import Version
 
 
 @pytest.fixture
@@ -155,7 +156,7 @@ def test_nlargest_n_identical_values(self):
         [["a", "b", "c"], ["c", "b", "a"], ["a"], ["b"], ["a", "b"], ["c", "b"]],
     )
     @pytest.mark.parametrize("n", range(1, 6))
-    def test_nlargest_n_duplicate_index(self, df_duplicates, n, order):
+    def test_nlargest_n_duplicate_index(self, df_duplicates, n, order, request):
         # GH#13412
 
         df = df_duplicates
@@ -165,6 +166,18 @@ def test_nlargest_n_duplicate_index(self, df_duplicates, n, order):
 
         result = df.nlargest(n, order)
         expected = df.sort_values(order, ascending=False).head(n)
+        if Version(np.__version__) >= Version("1.25") and (
+            (order == ["a"] and n in (1, 2, 3, 4)) or (order == ["a", "b"]) and n == 5
+        ):
+            request.node.add_marker(
+                pytest.mark.xfail(
+                    reason=(
+                        "pandas default unstable sorting of duplicates"
+                        "issue with numpy>=1.25 with AVX instructions"
+                    ),
+                    strict=False,
+                )
+            )
         tm.assert_frame_equal(result, expected)
 
     def test_nlargest_duplicate_keep_all_ties(self):
diff --git a/pandas/tests/frame/methods/test_pct_change.py b/pandas/tests/frame/methods/test_pct_change.py
index 37d6361dec935..d0153da038a75 100644
--- a/pandas/tests/frame/methods/test_pct_change.py
+++ b/pandas/tests/frame/methods/test_pct_change.py
@@ -10,7 +10,7 @@
 
 class TestDataFramePctChange:
     @pytest.mark.parametrize(
-        "periods,fill_method,limit,exp",
+        "periods, fill_method, limit, exp",
         [
             (1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
             (1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
@@ -28,7 +28,12 @@ def test_pct_change_with_nas(
         vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
         obj = frame_or_series(vals)
 
-        res = obj.pct_change(periods=periods, fill_method=fill_method, limit=limit)
+        msg = (
+            "The 'fill_method' and 'limit' keywords in "
+            f"{type(obj).__name__}.pct_change are deprecated"
+        )
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            res = obj.pct_change(periods=periods, fill_method=fill_method, limit=limit)
         tm.assert_equal(res, frame_or_series(exp))
 
     def test_pct_change_numeric(self):
@@ -40,21 +45,34 @@ def test_pct_change_numeric(self):
         pnl.iat[1, 1] = np.nan
         pnl.iat[2, 3] = 60
 
+        msg = (
+            "The 'fill_method' and 'limit' keywords in "
+            "DataFrame.pct_change are deprecated"
+        )
+
         for axis in range(2):
             expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(axis=axis) - 1
-            result = pnl.pct_change(axis=axis, fill_method="pad")
 
+            with tm.assert_produces_warning(FutureWarning, match=msg):
+                result = pnl.pct_change(axis=axis, fill_method="pad")
             tm.assert_frame_equal(result, expected)
 
     def test_pct_change(self, datetime_frame):
-        rs = datetime_frame.pct_change(fill_method=None)
+        msg = (
+            "The 'fill_method' and 'limit' keywords in "
+            "DataFrame.pct_change are deprecated"
+        )
+
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs = datetime_frame.pct_change(fill_method=None)
         tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
 
         rs = datetime_frame.pct_change(2)
         filled = datetime_frame.ffill()
         tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
 
-        rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
         filled = datetime_frame.bfill(limit=1)
         tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
 
@@ -69,7 +87,10 @@ def test_pct_change_shift_over_nas(self):
 
         df = DataFrame({"a": s, "b": s})
 
-        chg = df.pct_change()
+        msg = "The default fill_method='pad' in DataFrame.pct_change is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            chg = df.pct_change()
+
         expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
         edf = DataFrame({"a": expected, "b": expected})
         tm.assert_frame_equal(chg, edf)
@@ -88,18 +109,31 @@ def test_pct_change_shift_over_nas(self):
     def test_pct_change_periods_freq(
         self, datetime_frame, freq, periods, fill_method, limit
     ):
-        # GH#7292
-        rs_freq = datetime_frame.pct_change(
-            freq=freq, fill_method=fill_method, limit=limit
-        )
-        rs_periods = datetime_frame.pct_change(
-            periods, fill_method=fill_method, limit=limit
+        msg = (
+            "The 'fill_method' and 'limit' keywords in "
+            "DataFrame.pct_change are deprecated"
         )
+
+        # GH#7292
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs_freq = datetime_frame.pct_change(
+                freq=freq, fill_method=fill_method, limit=limit
+            )
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs_periods = datetime_frame.pct_change(
+                periods, fill_method=fill_method, limit=limit
+            )
         tm.assert_frame_equal(rs_freq, rs_periods)
 
         empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
-        rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
-        rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs_freq = empty_ts.pct_change(
+                freq=freq, fill_method=fill_method, limit=limit
+            )
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs_periods = empty_ts.pct_change(
+                periods, fill_method=fill_method, limit=limit
+            )
         tm.assert_frame_equal(rs_freq, rs_periods)
 
 
@@ -109,7 +143,14 @@ def test_pct_change_with_duplicated_indices(fill_method):
     data = DataFrame(
         {0: [np.nan, 1, 2, 3, 9, 18], 1: [0, 1, np.nan, 3, 9, 18]}, index=["a", "b"] * 3
     )
-    result = data.pct_change(fill_method=fill_method)
+
+    msg = (
+        "The 'fill_method' and 'limit' keywords in "
+        "DataFrame.pct_change are deprecated"
+    )
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        result = data.pct_change(fill_method=fill_method)
+
     if fill_method is None:
         second_column = [np.nan, np.inf, np.nan, np.nan, 2.0, 1.0]
     else:
diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py
index e8cebd5964236..63e2eb790a4ea 100644
--- a/pandas/tests/frame/methods/test_reindex.py
+++ b/pandas/tests/frame/methods/test_reindex.py
@@ -1291,3 +1291,10 @@ def test_reindex_not_category(self, index_df, index_res, index_exp):
         result = df.reindex(index=index_res)
         expected = DataFrame(index=index_exp)
         tm.assert_frame_equal(result, expected)
+
+    def test_invalid_method(self):
+        df = DataFrame({"A": [1, np.nan, 2]})
+
+        msg = "Invalid fill method"
+        with pytest.raises(ValueError, match=msg):
+            df.reindex([1, 0, 2], method="asfreq")
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index d5668020bab5d..9256df72cdf7b 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1236,7 +1236,9 @@ def test_replace_method(self, to_replace, method, expected):
         # GH 19632
         df = DataFrame({"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]})
 
-        result = df.replace(to_replace=to_replace, value=None, method=method)
+        msg = "The 'method' keyword in DataFrame.replace is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = df.replace(to_replace=to_replace, value=None, method=method)
         expected = DataFrame(expected)
         tm.assert_frame_equal(result, expected)
 
@@ -1327,8 +1329,13 @@ def test_replace_invalid_to_replace(self):
             r"Expecting 'to_replace' to be either a scalar, array-like, "
             r"dict or None, got invalid type.*"
         )
+        msg2 = (
+            "DataFrame.replace without 'value' and with non-dict-like "
+            "'to_replace' is deprecated"
+        )
         with pytest.raises(TypeError, match=msg):
-            df.replace(lambda x: x.strip())
+            with tm.assert_produces_warning(FutureWarning, match=msg2):
+                df.replace(lambda x: x.strip())
 
     @pytest.mark.parametrize("dtype", ["float", "float64", "int64", "Int64", "boolean"])
     @pytest.mark.parametrize("value", [np.nan, pd.NA])
diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py
index e2877acbdd040..4c41632040dbe 100644
--- a/pandas/tests/frame/methods/test_sort_values.py
+++ b/pandas/tests/frame/methods/test_sort_values.py
@@ -12,6 +12,7 @@
     date_range,
 )
 import pandas._testing as tm
+from pandas.util.version import Version
 
 
 class TestDataFrameSortValues:
@@ -849,9 +850,22 @@ def ascending(request):
 
 class TestSortValuesLevelAsStr:
     def test_sort_index_level_and_column_label(
-        self, df_none, df_idx, sort_names, ascending
+        self, df_none, df_idx, sort_names, ascending, request
     ):
         # GH#14353
+        if (
+            Version(np.__version__) >= Version("1.25")
+            and request.node.callspec.id == "df_idx0-inner-True"
+        ):
+            request.node.add_marker(
+                pytest.mark.xfail(
+                    reason=(
+                        "pandas default unstable sorting of duplicates"
+                        "issue with numpy>=1.25 with AVX instructions"
+                    ),
+                    strict=False,
+                )
+            )
 
         # Get index levels from df_idx
         levels = df_idx.index.names
@@ -867,7 +881,7 @@ def test_sort_index_level_and_column_label(
         tm.assert_frame_equal(result, expected)
 
     def test_sort_column_level_and_index_label(
-        self, df_none, df_idx, sort_names, ascending
+        self, df_none, df_idx, sort_names, ascending, request
     ):
         # GH#14353
 
@@ -886,6 +900,17 @@ def test_sort_column_level_and_index_label(
         # Compute result by transposing and sorting on axis=1.
         result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
 
+        if Version(np.__version__) >= Version("1.25"):
+            request.node.add_marker(
+                pytest.mark.xfail(
+                    reason=(
+                        "pandas default unstable sorting of duplicates"
+                        "issue with numpy>=1.25 with AVX instructions"
+                    ),
+                    strict=False,
+                )
+            )
+
         tm.assert_frame_equal(result, expected)
 
     def test_sort_values_validate_ascending_for_value_error(self):
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py
index 5671a569c8ac8..93aaacca5d41d 100644
--- a/pandas/tests/frame/methods/test_to_csv.py
+++ b/pandas/tests/frame/methods/test_to_csv.py
@@ -1307,3 +1307,65 @@ def test_to_csv_categorical_and_interval(self):
         expected_rows = [",a", '0,"[2020-01-01, 2020-01-02]"']
         expected = tm.convert_rows_list_to_csv_str(expected_rows)
         assert result == expected
+
+    def prepate_string_rep_of_comment_output(
+        self, delim: str, comments_attrs, data_for_comments_raw, frame_for_comments
+    ) -> str:
+        comment = "#"
+
+        data_for_comments_raw = data_for_comments_raw.replace(",", delim)
+        # Create string representation of data with attrs written at start
+        output_data_rows = []
+        for k, v in comments_attrs.items():
+            # Make sure delims being used are sanitized from comment lines
+            k = k.replace(delim, "")
+            v = v.replace(delim, "")
+            output_data_rows.append(f"{comment}{k}:{v}\n")
+        output_data = "".join(output_data_rows)
+        output_data = output_data + data_for_comments_raw
+        return output_data
+
+    def test_comment_writer_csv(
+        self, comments_attrs, data_for_comments_raw, frame_for_comments
+    ):
+        comment = "#"
+        delim = ","
+        output_data = self.prepate_string_rep_of_comment_output(
+            delim, comments_attrs, data_for_comments_raw, frame_for_comments
+        )
+        read_output = read_csv(StringIO(output_data), comment=comment)
+
+        # Check output data can be read correctly
+        tm.assert_frame_equal(
+            read_output, frame_for_comments
+        ), "Frame read from test data did not match expected results."
+
+        # Check saved output is as expected
+        with tm.ensure_clean() as path:
+            frame_for_comments.to_csv(path, comment=comment, index=False)
+            with open(path, encoding="utf-8") as fp:
+                lines = fp.read()
+                assert (
+                    lines == output_data
+                ), "csv output with comment lines not as expected"
+
+    def test_comment_writer_tabs(
+        self, comments_attrs, data_for_comments_raw, frame_for_comments
+    ):
+        comment = "#"
+        delim = "\t"
+        output_data = self.prepate_string_rep_of_comment_output(
+            delim, comments_attrs, data_for_comments_raw, frame_for_comments
+        )
+        read_output = read_csv(StringIO(output_data), comment=comment, sep="\t")
+
+        tm.assert_frame_equal(
+            read_output, frame_for_comments
+        ), "Read tab outputs are not as expected"
+        with tm.ensure_clean() as path:
+            frame_for_comments.to_csv(path, comment=comment, index=False, sep="\t")
+            with open(path, encoding="utf-8") as fp:
+                lines = fp.read()
+                assert (
+                    lines == output_data
+                ), "tsv output with comment lines not as expected"
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 47e307f561cf4..06e244b93016c 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2714,6 +2714,24 @@ def test_frame_from_dict_with_mixed_tzaware_indexes(self):
         with pytest.raises(TypeError, match=msg):
             DataFrame({"D": ser1, "A": ser2, "B": ser3})
 
+    @pytest.mark.parametrize(
+        "key_val, col_vals, col_type",
+        [
+            ["3", ["3", "4"], "utf8"],
+            [3, [3, 4], "int8"],
+        ],
+    )
+    def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type):
+        # GH 53617
+        pa = pytest.importorskip("pyarrow")
+        cols = pd.arrays.ArrowExtensionArray(
+            pa.array(col_vals, type=pa.dictionary(pa.int8(), getattr(pa, col_type)()))
+        )
+        result = DataFrame({key_val: [1, 2]}, columns=cols)
+        expected = DataFrame([[1, np.nan], [2, np.nan]], columns=cols)
+        expected.iloc[:, 1] = expected.iloc[:, 1].astype(object)
+        tm.assert_frame_equal(result, expected)
+
 
 class TestDataFrameConstructorWithDtypeCoercion:
     def test_floating_values_integer_dtype(self):
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index 2818df721db34..6cc6534da0b87 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -1,6 +1,7 @@
 from datetime import datetime
 from io import StringIO
 import itertools
+import re
 
 import numpy as np
 import pytest
@@ -1897,7 +1898,8 @@ def test_stack_multiple_bug(self):
         multi = df.set_index(["DATE", "ID"])
         multi.columns.name = "Params"
         unst = multi.unstack("ID")
-        with pytest.raises(TypeError, match="Could not convert"):
+        msg = re.escape("agg function failed [how->mean,dtype->object]")
+        with pytest.raises(TypeError, match=msg):
             unst.resample("W-THU").mean()
         down = unst.resample("W-THU").mean(numeric_only=True)
         rs = down.stack("ID")
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index 5c44a957b9373..3d1e9d26c1ea6 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -732,7 +732,9 @@ def test_equals_subclass(self):
     def test_replace_list_method(self):
         # https://github.com/pandas-dev/pandas/pull/46018
         df = tm.SubclassedDataFrame({"A": [0, 1, 2]})
-        result = df.replace([1, 2], method="ffill")
+        msg = "The 'method' keyword in SubclassedDataFrame.replace is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = df.replace([1, 2], method="ffill")
         expected = tm.SubclassedDataFrame({"A": [0, 0, 0]})
         assert isinstance(result, tm.SubclassedDataFrame)
         tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_unary.py b/pandas/tests/frame/test_unary.py
index 07bcb2ccc121a..5e29d3c868983 100644
--- a/pandas/tests/frame/test_unary.py
+++ b/pandas/tests/frame/test_unary.py
@@ -3,7 +3,7 @@
 import numpy as np
 import pytest
 
-from pandas.compat import is_numpy_dev
+from pandas.compat.numpy import np_version_gte1p25
 
 import pandas as pd
 import pandas._testing as tm
@@ -130,7 +130,7 @@ def test_pos_object(self, df):
     )
     def test_pos_object_raises(self, df):
         # GH#21380
-        if is_numpy_dev:
+        if np_version_gte1p25:
             with pytest.raises(
                 TypeError, match=r"^bad operand type for unary \+: \'str\'$"
             ):
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index 22460b1ea9dfe..f827eaf63a342 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -395,7 +395,8 @@ def ndframe_method(request):
 
 
 @pytest.mark.filterwarnings(
-    "ignore:DataFrame.fillna with 'method' is deprecated:FutureWarning"
+    "ignore:DataFrame.fillna with 'method' is deprecated:FutureWarning",
+    "ignore:last is deprecated:FutureWarning",
 )
 def test_finalize_called(ndframe_method):
     cls, init_args, method = ndframe_method
@@ -423,6 +424,23 @@ def test_finalize_first(data):
         assert result.attrs == {"a": 1}
 
 
+@pytest.mark.parametrize(
+    "data",
+    [
+        pd.Series(1, pd.date_range("2000", periods=4)),
+        pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
+    ],
+)
+def test_finalize_last(data):
+    # GH 53710
+    deprecated_msg = "last is deprecated"
+
+    data.attrs = {"a": 1}
+    with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):
+        result = data.last("3D")
+        assert result.attrs == {"a": 1}
+
+
 @not_implemented_mark
 def test_finalize_called_eval_numexpr():
     pytest.importorskip("numexpr")
diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py
index d585cc1648c5a..2514e988e4e80 100644
--- a/pandas/tests/groupby/aggregate/test_numba.py
+++ b/pandas/tests/groupby/aggregate/test_numba.py
@@ -13,6 +13,8 @@
 )
 import pandas._testing as tm
 
+pytestmark = pytest.mark.single_cpu
+
 
 @td.skip_if_no("numba")
 def test_correct_function_signature():
@@ -155,8 +157,7 @@ def test_multifunc_numba_vs_cython_frame(agg_kwargs):
     grouped = data.groupby(0)
     result = grouped.agg(**agg_kwargs, engine="numba")
     expected = grouped.agg(**agg_kwargs, engine="cython")
-    # check_dtype can be removed if GH 44952 is addressed
-    tm.assert_frame_equal(result, expected, check_dtype=False)
+    tm.assert_frame_equal(result, expected)
 
 
 @td.skip_if_no("numba")
@@ -192,6 +193,7 @@ def test_multifunc_numba_udf_frame(agg_kwargs, expected_func):
     result = grouped.agg(**agg_kwargs, engine="numba")
     expected = grouped.agg(expected_func, engine="cython")
     # check_dtype can be removed if GH 44952 is addressed
+    # Currently, UDFs still always return float64 while reductions can preserve dtype
     tm.assert_frame_equal(result, expected, check_dtype=False)
 
 
diff --git a/pandas/tests/groupby/conftest.py b/pandas/tests/groupby/conftest.py
index 7e7b97d9273dc..c5e30513f69de 100644
--- a/pandas/tests/groupby/conftest.py
+++ b/pandas/tests/groupby/conftest.py
@@ -196,8 +196,23 @@ def nopython(request):
         ("sum", {}),
         ("min", {}),
         ("max", {}),
+        ("sum", {"min_count": 2}),
+        ("min", {"min_count": 2}),
+        ("max", {"min_count": 2}),
+    ],
+    ids=[
+        "mean",
+        "var_1",
+        "var_0",
+        "std_1",
+        "std_0",
+        "sum",
+        "min",
+        "max",
+        "sum-min_count",
+        "min-min_count",
+        "max-min_count",
     ],
-    ids=["mean", "var_1", "var_0", "std_1", "std_0", "sum", "min", "max"],
 )
 def numba_supported_reductions(request):
     """reductions supported with engine='numba'"""
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 98fce9d668e44..0535bafc2a907 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1,5 +1,6 @@
 import builtins
 from io import StringIO
+import re
 
 import numpy as np
 import pytest
@@ -249,8 +250,10 @@ def _check(self, df, method, expected_columns, expected_columns_numeric):
             msg = "|".join(
                 [
                     "Categorical is not ordered",
-                    "function is not implemented for this dtype",
                     f"Cannot perform {method} with non-ordered Categorical",
+                    re.escape(f"agg function failed [how->{method},dtype->object]"),
+                    # cumsum/cummin/cummax/cumprod
+                    "function is not implemented for this dtype",
                 ]
             )
             with pytest.raises(exception, match=msg):
@@ -259,12 +262,9 @@ def _check(self, df, method, expected_columns, expected_columns_numeric):
             msg = "|".join(
                 [
                     "category type does not support sum operations",
-                    "[Cc]ould not convert",
-                    "can't multiply sequence by non-int of type 'str'",
+                    re.escape(f"agg function failed [how->{method},dtype->object]"),
                 ]
             )
-            if method == "median":
-                msg = r"Cannot convert \['a' 'b'\] to numeric"
             with pytest.raises(exception, match=msg):
                 getattr(gb, method)()
         else:
@@ -274,16 +274,13 @@ def _check(self, df, method, expected_columns, expected_columns_numeric):
         if method not in ("first", "last"):
             msg = "|".join(
                 [
-                    "[Cc]ould not convert",
                     "Categorical is not ordered",
                     "category type does not support",
-                    "can't multiply sequence",
                     "function is not implemented for this dtype",
                     f"Cannot perform {method} with non-ordered Categorical",
+                    re.escape(f"agg function failed [how->{method},dtype->object]"),
                 ]
             )
-            if method == "median":
-                msg = r"Cannot convert \['a' 'b'\] to numeric"
             with pytest.raises(exception, match=msg):
                 getattr(gb, method)(numeric_only=False)
         else:
@@ -1464,16 +1461,14 @@ def test_numeric_only(kernel, has_arg, numeric_only, keys):
         msg = "|".join(
             [
                 "not allowed for this dtype",
-                "must be a string or a number",
                 "cannot be performed against 'object' dtypes",
-                "must be a string or a real number",
+                # On PY39 message is "a number"; on PY310 and after is "a real number"
+                "must be a string or a.* number",
                 "unsupported operand type",
-                "not supported between instances of",
                 "function is not implemented for this dtype",
+                re.escape(f"agg function failed [how->{kernel},dtype->object]"),
             ]
         )
-        if kernel == "median":
-            msg = r"Cannot convert \[<class 'object'> <class 'object'>\] to numeric"
         with pytest.raises(exception, match=msg):
             method(*args, **kwargs)
     elif not has_arg and numeric_only is not lib.no_default:
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index bf0b646847ed6..79fc631fff87c 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1,5 +1,6 @@
 from datetime import datetime
 from decimal import Decimal
+import re
 
 import numpy as np
 import pytest
@@ -641,7 +642,7 @@ def test_frame_multi_key_function_list_partial_failure():
 
     grouped = data.groupby(["A", "B"])
     funcs = [np.mean, np.std]
-    msg = "Could not convert string 'dullshinyshiny' to numeric"
+    msg = re.escape("agg function failed [how->mean,dtype->object]")
     with pytest.raises(TypeError, match=msg):
         grouped.agg(funcs)
 
@@ -695,6 +696,16 @@ def test_as_index_select_column():
     tm.assert_series_equal(result, expected)
 
 
+def test_obj_arg_get_group_deprecated():
+    depr_msg = "obj is deprecated"
+
+    df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})
+    expected = df.iloc[df.groupby("b").indices.get(4)]
+    with tm.assert_produces_warning(FutureWarning, match=depr_msg):
+        result = df.groupby("b").get_group(4, obj=df)
+        tm.assert_frame_equal(result, expected)
+
+
 def test_groupby_as_index_select_column_sum_empty_df():
     # GH 35246
     df = DataFrame(columns=Index(["A", "B", "C"], name="alpha"))
@@ -915,9 +926,10 @@ def test_groupby_multi_corner(df):
 
 def test_raises_on_nuisance(df):
     grouped = df.groupby("A")
-    with pytest.raises(TypeError, match="Could not convert"):
+    msg = re.escape("agg function failed [how->mean,dtype->object]")
+    with pytest.raises(TypeError, match=msg):
         grouped.agg(np.mean)
-    with pytest.raises(TypeError, match="Could not convert"):
+    with pytest.raises(TypeError, match=msg):
         grouped.mean()
 
     df = df.loc[:, ["A", "C", "D"]]
@@ -965,10 +977,12 @@ def test_omit_nuisance_agg(df, agg_function, numeric_only):
     if agg_function in no_drop_nuisance and not numeric_only:
         # Added numeric_only as part of GH#46560; these do not drop nuisance
         # columns when numeric_only is False
-        klass = ValueError if agg_function in ("std", "sem") else TypeError
-        msg = "|".join(["[C|c]ould not convert", "can't multiply sequence"])
-        if agg_function == "median":
-            msg = r"Cannot convert \['one' 'three' 'two'\] to numeric"
+        if agg_function in ("std", "sem"):
+            klass = ValueError
+            msg = "could not convert string to float: 'one'"
+        else:
+            klass = TypeError
+            msg = re.escape(f"agg function failed [how->{agg_function},dtype->object]")
         with pytest.raises(klass, match=msg):
             getattr(grouped, agg_function)(numeric_only=numeric_only)
     else:
@@ -993,9 +1007,10 @@ def test_raise_on_nuisance_python_single(df):
 
 def test_raise_on_nuisance_python_multiple(three_group):
     grouped = three_group.groupby(["A", "B"])
-    with pytest.raises(TypeError, match="Could not convert"):
+    msg = re.escape("agg function failed [how->mean,dtype->object]")
+    with pytest.raises(TypeError, match=msg):
         grouped.agg(np.mean)
-    with pytest.raises(TypeError, match="Could not convert"):
+    with pytest.raises(TypeError, match=msg):
         grouped.mean()
 
 
@@ -1035,7 +1050,8 @@ def test_wrap_aggregated_output_multindex(mframe):
     df["baz", "two"] = "peekaboo"
 
     keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
-    with pytest.raises(TypeError, match="Could not convert"):
+    msg = re.escape("agg function failed [how->mean,dtype->object]")
+    with pytest.raises(TypeError, match=msg):
         df.groupby(keys).agg(np.mean)
     agged = df.drop(columns=("baz", "two")).groupby(keys).agg(np.mean)
     assert isinstance(agged.columns, MultiIndex)
diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index 269fda8fbf361..ab268a1d94b96 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -622,8 +622,15 @@ def test_categorical_transformers(
         "x", dropna=False, observed=observed, sort=sort, as_index=as_index
     )
     gb_dropna = df.groupby("x", dropna=True, observed=observed, sort=sort)
-    result = getattr(gb_keepna, transformation_func)(*args)
+
+    msg = "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated"
+    if transformation_func == "pct_change":
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = getattr(gb_keepna, "pct_change")(*args)
+    else:
+        result = getattr(gb_keepna, transformation_func)(*args)
     expected = getattr(gb_dropna, transformation_func)(*args)
+
     for iloc, value in zip(
         df[df["x"].isnull()].index.tolist(), null_group_result.values.ravel()
     ):
diff --git a/pandas/tests/groupby/test_libgroupby.py b/pandas/tests/groupby/test_libgroupby.py
index d10bcf9053d1a..92c3b68d87fad 100644
--- a/pandas/tests/groupby/test_libgroupby.py
+++ b/pandas/tests/groupby/test_libgroupby.py
@@ -6,6 +6,7 @@
     group_cumprod,
     group_cumsum,
     group_mean,
+    group_sum,
     group_var,
 )
 
@@ -302,3 +303,29 @@ def test_cython_group_mean_Inf_at_begining_and_end():
         actual,
         expected,
     )
+
+
+@pytest.mark.parametrize(
+    "values, out",
+    [
+        ([[np.inf], [np.inf], [np.inf]], [[np.inf], [np.inf]]),
+        ([[np.inf], [np.inf], [-np.inf]], [[np.inf], [np.nan]]),
+        ([[np.inf], [-np.inf], [np.inf]], [[np.inf], [np.nan]]),
+        ([[np.inf], [-np.inf], [-np.inf]], [[np.inf], [-np.inf]]),
+    ],
+)
+def test_cython_group_sum_Inf_at_begining_and_end(values, out):
+    # GH #53606
+    actual = np.array([[np.nan], [np.nan]], dtype="float64")
+    counts = np.array([0, 0], dtype="int64")
+    data = np.array(values, dtype="float64")
+    labels = np.array([0, 1, 1], dtype=np.intp)
+
+    group_sum(actual, counts, data, labels, None, is_datetimelike=False)
+
+    expected = np.array(out, dtype="float64")
+
+    tm.assert_numpy_array_equal(
+        actual,
+        expected,
+    )
diff --git a/pandas/tests/groupby/test_numba.py b/pandas/tests/groupby/test_numba.py
index 4eb7b6a7b5bea..7d4440b595dff 100644
--- a/pandas/tests/groupby/test_numba.py
+++ b/pandas/tests/groupby/test_numba.py
@@ -8,6 +8,8 @@
 )
 import pandas._testing as tm
 
+pytestmark = pytest.mark.single_cpu
+
 
 @td.skip_if_no("numba")
 @pytest.mark.filterwarnings("ignore")
@@ -24,9 +26,7 @@ def test_cython_vs_numba_frame(
             engine="numba", engine_kwargs=engine_kwargs, **kwargs
         )
         expected = getattr(gb, func)(**kwargs)
-        # check_dtype can be removed if GH 44952 is addressed
-        check_dtype = func not in ("sum", "min", "max")
-        tm.assert_frame_equal(result, expected, check_dtype=check_dtype)
+        tm.assert_frame_equal(result, expected)
 
     def test_cython_vs_numba_getitem(
         self, sort, nogil, parallel, nopython, numba_supported_reductions
@@ -39,9 +39,7 @@ def test_cython_vs_numba_getitem(
             engine="numba", engine_kwargs=engine_kwargs, **kwargs
         )
         expected = getattr(gb, func)(**kwargs)
-        # check_dtype can be removed if GH 44952 is addressed
-        check_dtype = func not in ("sum", "min", "max")
-        tm.assert_series_equal(result, expected, check_dtype=check_dtype)
+        tm.assert_series_equal(result, expected)
 
     def test_cython_vs_numba_series(
         self, sort, nogil, parallel, nopython, numba_supported_reductions
@@ -54,9 +52,7 @@ def test_cython_vs_numba_series(
             engine="numba", engine_kwargs=engine_kwargs, **kwargs
         )
         expected = getattr(gb, func)(**kwargs)
-        # check_dtype can be removed if GH 44952 is addressed
-        check_dtype = func not in ("sum", "min", "max")
-        tm.assert_series_equal(result, expected, check_dtype=check_dtype)
+        tm.assert_series_equal(result, expected)
 
     def test_as_index_false_unsupported(self, numba_supported_reductions):
         func, kwargs = numba_supported_reductions
diff --git a/pandas/tests/groupby/test_nunique.py b/pandas/tests/groupby/test_nunique.py
index 661003d081bda..f4ebd54a7a1a9 100644
--- a/pandas/tests/groupby/test_nunique.py
+++ b/pandas/tests/groupby/test_nunique.py
@@ -17,51 +17,43 @@
 
 
 @pytest.mark.slow
-@pytest.mark.parametrize("n", 10 ** np.arange(2, 6))
-@pytest.mark.parametrize("m", [10, 100, 1000])
 @pytest.mark.parametrize("sort", [False, True])
 @pytest.mark.parametrize("dropna", [False, True])
-def test_series_groupby_nunique(n, m, sort, dropna):
-    def check_nunique(df, keys, as_index=True):
-        original_df = df.copy()
-        gr = df.groupby(keys, as_index=as_index, sort=sort)
-        left = gr["julie"].nunique(dropna=dropna)
-
-        gr = df.groupby(keys, as_index=as_index, sort=sort)
-        right = gr["julie"].apply(Series.nunique, dropna=dropna)
-        if not as_index:
-            right = right.reset_index(drop=True)
-
-        if as_index:
-            tm.assert_series_equal(left, right, check_names=False)
-        else:
-            tm.assert_frame_equal(left, right, check_names=False)
-        tm.assert_frame_equal(df, original_df)
-
+@pytest.mark.parametrize("as_index", [True, False])
+@pytest.mark.parametrize("with_nan", [True, False])
+@pytest.mark.parametrize("keys", [["joe"], ["joe", "jim"]])
+def test_series_groupby_nunique(sort, dropna, as_index, with_nan, keys):
+    n = 100
+    m = 10
     days = date_range("2015-08-23", periods=10)
-
-    frame = DataFrame(
+    df = DataFrame(
         {
             "jim": np.random.choice(list(ascii_lowercase), n),
             "joe": np.random.choice(days, n),
             "julie": np.random.randint(0, m, n),
         }
     )
-
-    check_nunique(frame, ["jim"])
-    check_nunique(frame, ["jim", "joe"])
-
-    frame = frame.astype({"julie": float})  # Explicit cast to avoid implicit cast below
-    frame.loc[1::17, "jim"] = None
-    frame.loc[3::37, "joe"] = None
-    frame.loc[7::19, "julie"] = None
-    frame.loc[8::19, "julie"] = None
-    frame.loc[9::19, "julie"] = None
-
-    check_nunique(frame, ["jim"])
-    check_nunique(frame, ["jim", "joe"])
-    check_nunique(frame, ["jim"], as_index=False)
-    check_nunique(frame, ["jim", "joe"], as_index=False)
+    if with_nan:
+        df = df.astype({"julie": float})  # Explicit cast to avoid implicit cast below
+        df.loc[1::17, "jim"] = None
+        df.loc[3::37, "joe"] = None
+        df.loc[7::19, "julie"] = None
+        df.loc[8::19, "julie"] = None
+        df.loc[9::19, "julie"] = None
+    original_df = df.copy()
+    gr = df.groupby(keys, as_index=as_index, sort=sort)
+    left = gr["julie"].nunique(dropna=dropna)
+
+    gr = df.groupby(keys, as_index=as_index, sort=sort)
+    right = gr["julie"].apply(Series.nunique, dropna=dropna)
+    if not as_index:
+        right = right.reset_index(drop=True)
+
+    if as_index:
+        tm.assert_series_equal(left, right, check_names=False)
+    else:
+        tm.assert_frame_equal(left, right, check_names=False)
+    tm.assert_frame_equal(df, original_df)
 
 
 def test_nunique():
diff --git a/pandas/tests/groupby/test_raises.py b/pandas/tests/groupby/test_raises.py
index 6fb903b02b62f..180755c1dca12 100644
--- a/pandas/tests/groupby/test_raises.py
+++ b/pandas/tests/groupby/test_raises.py
@@ -3,6 +3,7 @@
 # test file.
 
 import datetime
+import re
 
 import numpy as np
 import pytest
@@ -162,24 +163,20 @@ def test_groupby_raises_string(
         "max": (None, ""),
         "mean": (
             TypeError,
-            "Could not convert string '(xy|xyzwt|xyz|xztuo)' to numeric",
+            re.escape("agg function failed [how->mean,dtype->object]"),
         ),
         "median": (
             TypeError,
-            "|".join(
-                [
-                    r"Cannot convert \['x' 'y' 'z'\] to numeric",
-                    r"Cannot convert \['x' 'y'\] to numeric",
-                    r"Cannot convert \['x' 'y' 'z' 'w' 't'\] to numeric",
-                    r"Cannot convert \['x' 'z' 't' 'u' 'o'\] to numeric",
-                ]
-            ),
+            re.escape("agg function failed [how->median,dtype->object]"),
         ),
         "min": (None, ""),
         "ngroup": (None, ""),
         "nunique": (None, ""),
         "pct_change": (TypeError, "unsupported operand type"),
-        "prod": (TypeError, "can't multiply sequence by non-int of type 'str'"),
+        "prod": (
+            TypeError,
+            re.escape("agg function failed [how->prod,dtype->object]"),
+        ),
         "quantile": (TypeError, "cannot be performed against 'object' dtypes!"),
         "rank": (None, ""),
         "sem": (ValueError, "could not convert string to float"),
@@ -188,7 +185,10 @@ def test_groupby_raises_string(
         "skew": (ValueError, "could not convert string to float"),
         "std": (ValueError, "could not convert string to float"),
         "sum": (None, ""),
-        "var": (TypeError, "could not convert string to float"),
+        "var": (
+            TypeError,
+            re.escape("agg function failed [how->var,dtype->object]"),
+        ),
     }[groupby_func]
 
     _call_and_check(klass, msg, how, gb, groupby_func, args)
@@ -225,7 +225,7 @@ def test_groupby_raises_string_np(
         np.sum: (None, ""),
         np.mean: (
             TypeError,
-            "Could not convert string '(xyzwt|xy|xyz|xztuo)' to numeric",
+            re.escape("agg function failed [how->mean,dtype->object]"),
         ),
     }[groupby_func_np]
 
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index 5477ad75a56f7..78c8b6b236b65 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -21,6 +21,7 @@
     to_datetime,
 )
 import pandas._testing as tm
+from pandas.util.version import Version
 
 
 def tests_value_counts_index_names_category_column():
@@ -246,8 +247,18 @@ def test_bad_subset(education_df):
         gp.value_counts(subset=["country"])
 
 
-def test_basic(education_df):
+def test_basic(education_df, request):
     # gh43564
+    if Version(np.__version__) >= Version("1.25"):
+        request.node.add_marker(
+            pytest.mark.xfail(
+                reason=(
+                    "pandas default unstable sorting of duplicates"
+                    "issue with numpy>=1.25 with AVX instructions"
+                ),
+                strict=False,
+            )
+        )
     result = education_df.groupby("country")[["gender", "education"]].value_counts(
         normalize=True
     )
@@ -285,7 +296,7 @@ def _frame_value_counts(df, keys, normalize, sort, ascending):
 @pytest.mark.parametrize("as_index", [True, False])
 @pytest.mark.parametrize("frame", [True, False])
 def test_against_frame_and_seriesgroupby(
-    education_df, groupby, normalize, name, sort, ascending, as_index, frame
+    education_df, groupby, normalize, name, sort, ascending, as_index, frame, request
 ):
     # test all parameters:
     # - Use column, array or function as by= parameter
@@ -295,6 +306,16 @@ def test_against_frame_and_seriesgroupby(
     # - 3-way compare against:
     #   - apply with :meth:`~DataFrame.value_counts`
     #   - `~SeriesGroupBy.value_counts`
+    if Version(np.__version__) >= Version("1.25") and frame and sort and normalize:
+        request.node.add_marker(
+            pytest.mark.xfail(
+                reason=(
+                    "pandas default unstable sorting of duplicates"
+                    "issue with numpy>=1.25 with AVX instructions"
+                ),
+                strict=False,
+            )
+        )
     by = {
         "column": "country",
         "array": education_df["country"].values,
@@ -456,8 +477,18 @@ def nulls_df():
     ],
 )
 def test_dropna_combinations(
-    nulls_df, group_dropna, count_dropna, expected_rows, expected_values
+    nulls_df, group_dropna, count_dropna, expected_rows, expected_values, request
 ):
+    if Version(np.__version__) >= Version("1.25") and not group_dropna:
+        request.node.add_marker(
+            pytest.mark.xfail(
+                reason=(
+                    "pandas default unstable sorting of duplicates"
+                    "issue with numpy>=1.25 with AVX instructions"
+                ),
+                strict=False,
+            )
+        )
     gp = nulls_df.groupby(["A", "B"], dropna=group_dropna)
     result = gp.value_counts(normalize=True, sort=True, dropna=count_dropna)
     columns = DataFrame()
@@ -548,10 +579,20 @@ def test_data_frame_value_counts_dropna(
     ],
 )
 def test_categorical_single_grouper_with_only_observed_categories(
-    education_df, as_index, observed, normalize, name, expected_data
+    education_df, as_index, observed, normalize, name, expected_data, request
 ):
     # Test single categorical grouper with only observed grouping categories
     # when non-groupers are also categorical
+    if Version(np.__version__) >= Version("1.25"):
+        request.node.add_marker(
+            pytest.mark.xfail(
+                reason=(
+                    "pandas default unstable sorting of duplicates"
+                    "issue with numpy>=1.25 with AVX instructions"
+                ),
+                strict=False,
+            )
+        )
 
     gp = education_df.astype("category").groupby(
         "country", as_index=as_index, observed=observed
@@ -647,10 +688,21 @@ def assert_categorical_single_grouper(
     ],
 )
 def test_categorical_single_grouper_observed_true(
-    education_df, as_index, normalize, name, expected_data
+    education_df, as_index, normalize, name, expected_data, request
 ):
     # GH#46357
 
+    if Version(np.__version__) >= Version("1.25"):
+        request.node.add_marker(
+            pytest.mark.xfail(
+                reason=(
+                    "pandas default unstable sorting of duplicates"
+                    "issue with numpy>=1.25 with AVX instructions"
+                ),
+                strict=False,
+            )
+        )
+
     expected_index = [
         ("FR", "male", "low"),
         ("FR", "female", "high"),
@@ -717,10 +769,21 @@ def test_categorical_single_grouper_observed_true(
     ],
 )
 def test_categorical_single_grouper_observed_false(
-    education_df, as_index, normalize, name, expected_data
+    education_df, as_index, normalize, name, expected_data, request
 ):
     # GH#46357
 
+    if Version(np.__version__) >= Version("1.25"):
+        request.node.add_marker(
+            pytest.mark.xfail(
+                reason=(
+                    "pandas default unstable sorting of duplicates"
+                    "issue with numpy>=1.25 with AVX instructions"
+                ),
+                strict=False,
+            )
+        )
+
     expected_index = [
         ("FR", "male", "low"),
         ("FR", "female", "high"),
@@ -858,10 +921,22 @@ def test_categorical_multiple_groupers(
     ],
 )
 def test_categorical_non_groupers(
-    education_df, as_index, observed, normalize, name, expected_data
+    education_df, as_index, observed, normalize, name, expected_data, request
 ):
     # GH#46357 Test non-observed categories are included in the result,
     # regardless of `observed`
+
+    if Version(np.__version__) >= Version("1.25"):
+        request.node.add_marker(
+            pytest.mark.xfail(
+                reason=(
+                    "pandas default unstable sorting of duplicates"
+                    "issue with numpy>=1.25 with AVX instructions"
+                ),
+                strict=False,
+            )
+        )
+
     education_df = education_df.copy()
     education_df["gender"] = education_df["gender"].astype("category")
     education_df["education"] = education_df["education"].astype("category")
diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py
index 00ff391199652..ddc3fc7e2de3a 100644
--- a/pandas/tests/groupby/transform/test_numba.py
+++ b/pandas/tests/groupby/transform/test_numba.py
@@ -11,6 +11,8 @@
 )
 import pandas._testing as tm
 
+pytestmark = pytest.mark.single_cpu
+
 
 @td.skip_if_no("numba")
 def test_correct_function_signature():
@@ -130,20 +132,25 @@ def func_1(values, index):
     tm.assert_frame_equal(expected, result)
 
 
+# TODO: Test more than just reductions (e.g. actually test transformations once we have
 @td.skip_if_no("numba")
 @pytest.mark.parametrize(
     "agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}]
 )
-def test_multifunc_notimplimented(agg_func):
+def test_string_cython_vs_numba(agg_func, numba_supported_reductions):
+    agg_func, kwargs = numba_supported_reductions
     data = DataFrame(
         {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
     )
     grouped = data.groupby(0)
-    with pytest.raises(NotImplementedError, match="Numba engine can"):
-        grouped.transform(agg_func, engine="numba")
 
-    with pytest.raises(NotImplementedError, match="Numba engine can"):
-        grouped[1].transform(agg_func, engine="numba")
+    result = grouped.transform(agg_func, engine="numba", **kwargs)
+    expected = grouped.transform(agg_func, engine="cython", **kwargs)
+    tm.assert_frame_equal(result, expected)
+
+    result = grouped[1].transform(agg_func, engine="numba", **kwargs)
+    expected = grouped[1].transform(agg_func, engine="cython", **kwargs)
+    tm.assert_series_equal(result, expected)
 
 
 @td.skip_if_no("numba")
@@ -232,9 +239,6 @@ def numba_func(values, index):
 
 
 @td.skip_if_no("numba")
-@pytest.mark.xfail(
-    reason="Groupby transform doesn't support strings as function inputs yet with numba"
-)
 def test_multilabel_numba_vs_cython(numba_supported_reductions):
     reduction, kwargs = numba_supported_reductions
     df = DataFrame(
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index eb7e6c154afc9..397500f64787f 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -408,11 +408,24 @@ def mock_op(x):
         test_op = lambda x: x.transform(transformation_func)
         mock_op = lambda x: getattr(x, transformation_func)()
 
-    result = test_op(df.groupby("A"))
+    msg = "The default fill_method='pad' in DataFrame.pct_change is deprecated"
+    groupby_msg = (
+        "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated"
+    )
+    if transformation_func == "pct_change":
+        with tm.assert_produces_warning(FutureWarning, match=groupby_msg):
+            result = test_op(df.groupby("A"))
+    else:
+        result = test_op(df.groupby("A"))
+
     # pass the group in same order as iterating `for ... in df.groupby(...)`
     # but reorder to match df's index since this is a transform
     groups = [df[["B"]].iloc[4:6], df[["B"]].iloc[6:], df[["B"]].iloc[:4]]
-    expected = concat([mock_op(g) for g in groups]).sort_index()
+    if transformation_func == "pct_change":
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            expected = concat([mock_op(g) for g in groups]).sort_index()
+    else:
+        expected = concat([mock_op(g) for g in groups]).sort_index()
     # sort_index does not preserve the freq
     expected = expected.set_axis(df.index)
 
@@ -973,9 +986,14 @@ def test_pct_change(frame_or_series, freq, periods, fill_method, limit):
     else:
         expected = expected.to_frame("vals")
 
-    result = gb.pct_change(
-        periods=periods, fill_method=fill_method, limit=limit, freq=freq
+    msg = (
+        "The 'fill_method' and 'limit' keywords in "
+        f"{type(gb).__name__}.pct_change are deprecated"
     )
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        result = gb.pct_change(
+            periods=periods, fill_method=fill_method, limit=limit, freq=freq
+        )
     tm.assert_equal(result, expected)
 
 
@@ -1412,7 +1430,12 @@ def test_null_group_str_transformer(request, dropna, transformation_func):
         # ngroup/cumcount always returns a Series as it counts the groups, not values
         expected = expected["B"].rename(None)
 
-    result = gb.transform(transformation_func, *args)
+    msg = "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated"
+    if transformation_func == "pct_change" and not dropna:
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = gb.transform("pct_change", *args)
+    else:
+        result = gb.transform(transformation_func, *args)
 
     tm.assert_equal(result, expected)
 
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index e7db8076efa2b..e65ae52e348c6 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -424,6 +424,17 @@ def test_get_indexer_interval_index(self, box):
         expected = np.array([-1, -1, -1], dtype=np.intp)
         tm.assert_numpy_array_equal(actual, expected)
 
+    def test_get_indexer_read_only(self):
+        idx = interval_range(start=0, end=5)
+        arr = np.array([1, 2])
+        arr.flags.writeable = False
+        result = idx.get_indexer(arr)
+        expected = np.array([0, 1])
+        tm.assert_numpy_array_equal(result, expected, check_dtype=False)
+
+        result = idx.get_indexer_non_unique(arr)[0]
+        tm.assert_numpy_array_equal(result, expected, check_dtype=False)
+
 
 class TestSliceLocs:
     def test_slice_locs_with_interval(self):
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 147247d23c2c6..fd0928b82ecbf 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -558,8 +558,6 @@ def test_union_with_missing_values_on_both_sides(nulls_fixture):
     mi2 = MultiIndex.from_arrays([[1, nulls_fixture, 3]])
     result = mi1.union(mi2)
     expected = MultiIndex.from_arrays([[1, 3, nulls_fixture]])
-    # We don't particularly care about having levels[0] be float64, but it is
-    expected = expected.set_levels([expected.levels[0].astype(np.float64)])
     tm.assert_index_equal(result, expected)
 
 
diff --git a/pandas/tests/indexes/numeric/test_indexing.py b/pandas/tests/indexes/numeric/test_indexing.py
index c99e912ce4c0f..cd28d519313ed 100644
--- a/pandas/tests/indexes/numeric/test_indexing.py
+++ b/pandas/tests/indexes/numeric/test_indexing.py
@@ -11,7 +11,10 @@
     Timestamp,
 )
 import pandas._testing as tm
-from pandas.core.arrays import FloatingArray
+from pandas.core.arrays import (
+    ArrowExtensionArray,
+    FloatingArray,
+)
 
 
 @pytest.fixture
@@ -389,6 +392,26 @@ def test_get_indexer_masked_na_boolean(self, dtype):
         result = idx.get_loc(NA)
         assert result == 2
 
+    def test_get_indexer_arrow_dictionary_target(self):
+        pa = pytest.importorskip("pyarrow")
+        target = Index(
+            ArrowExtensionArray(
+                pa.array([1, 2], type=pa.dictionary(pa.int8(), pa.int8()))
+            )
+        )
+        idx = Index([1])
+
+        result = idx.get_indexer(target)
+        expected = np.array([0, -1], dtype=np.int64)
+        tm.assert_numpy_array_equal(result, expected)
+
+        result_1, result_2 = idx.get_indexer_non_unique(target)
+        expected_1, expected_2 = np.array([0, -1], dtype=np.int64), np.array(
+            [1], dtype=np.int64
+        )
+        tm.assert_numpy_array_equal(result_1, expected_1)
+        tm.assert_numpy_array_equal(result_2, expected_2)
+
 
 class TestWhere:
     @pytest.mark.parametrize(
diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py
index 4080dc7081771..977c7da7d866f 100644
--- a/pandas/tests/indexes/numeric/test_numeric.py
+++ b/pandas/tests/indexes/numeric/test_numeric.py
@@ -341,7 +341,7 @@ def test_constructor(self, dtype):
         # copy
         # pass list, coerce fine
         index = index_cls([-5, 0, 1, 2], dtype=dtype)
-        arr = index.values
+        arr = index.values.copy()
         new_index = index_cls(arr, copy=True)
         tm.assert_index_equal(new_index, index, exact=True)
         val = arr[0] + 3000
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index 83b32bb1230c2..b73bd7c78f009 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -31,7 +31,7 @@
 
 class TestCommon:
     @pytest.mark.parametrize("name", [None, "new_name"])
-    def test_to_frame(self, name, index_flat):
+    def test_to_frame(self, name, index_flat, using_copy_on_write):
         # see GH#15230, GH#22580
         idx = index_flat
 
@@ -45,7 +45,8 @@ def test_to_frame(self, name, index_flat):
         assert df.index is idx
         assert len(df.columns) == 1
         assert df.columns[0] == idx_name
-        assert df[idx_name].values is not idx.values
+        if not using_copy_on_write:
+            assert df[idx_name].values is not idx.values
 
         df = idx.to_frame(index=False, name=idx_name)
         assert df.index is not idx
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 80fa78ccdc97e..8d75c1bf73778 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -577,6 +577,43 @@ def test_union_nan_in_both(dup):
     tm.assert_index_equal(result, expected)
 
 
+def test_union_rangeindex_sort_true():
+    # GH 53490
+    idx1 = RangeIndex(1, 100, 6)
+    idx2 = RangeIndex(1, 50, 3)
+    result = idx1.union(idx2, sort=True)
+    expected = Index(
+        [
+            1,
+            4,
+            7,
+            10,
+            13,
+            16,
+            19,
+            22,
+            25,
+            28,
+            31,
+            34,
+            37,
+            40,
+            43,
+            46,
+            49,
+            55,
+            61,
+            67,
+            73,
+            79,
+            85,
+            91,
+            97,
+        ]
+    )
+    tm.assert_index_equal(result, expected)
+
+
 def test_union_with_duplicate_index_not_subset_and_non_monotonic(
     any_dtype_for_small_pos_integer_indexes,
 ):
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 05fdddd7a4f4f..72bdc6da47d94 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -38,10 +38,27 @@ def test_timedelta_range(self):
         result = timedelta_range("1 days, 00:00:02", periods=5, freq="2D")
         tm.assert_index_equal(result, expected)
 
-        expected = to_timedelta(np.arange(50), unit="T") * 30
-        result = timedelta_range("0 days", freq="30T", periods=50)
+        expected = to_timedelta(np.arange(50), unit="min") * 30
+        result = timedelta_range("0 days", freq="30min", periods=50)
         tm.assert_index_equal(result, expected)
 
+    @pytest.mark.parametrize(
+        "depr_unit, unit",
+        [
+            ("T", "minute"),
+            ("t", "minute"),
+            ("L", "millisecond"),
+            ("l", "millisecond"),
+        ],
+    )
+    def test_timedelta_units_T_L_deprecated(self, depr_unit, unit):
+        depr_msg = f"Unit '{depr_unit}' is deprecated."
+
+        expected = to_timedelta(np.arange(5), unit=unit)
+        with tm.assert_produces_warning(FutureWarning, match=depr_msg):
+            result = to_timedelta(np.arange(5), unit=depr_unit)
+            tm.assert_index_equal(result, expected)
+
     @pytest.mark.parametrize(
         "periods, freq", [(3, "2D"), (5, "D"), (6, "19H12T"), (7, "16H"), (9, "12H")]
     )
diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py
index 36b7dcfe4db12..de36d52921622 100644
--- a/pandas/tests/indexing/multiindex/test_indexing_slow.py
+++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py
@@ -1,8 +1,3 @@
-from typing import (
-    Any,
-    List,
-)
-
 import numpy as np
 import pytest
 
@@ -13,78 +8,72 @@
 )
 import pandas._testing as tm
 
-m = 50
-n = 1000
-cols = ["jim", "joe", "jolie", "joline", "jolia"]
-
-vals: List[Any] = [
-    np.random.randint(0, 10, n),
-    np.random.choice(list("abcdefghij"), n),
-    np.random.choice(pd.date_range("20141009", periods=10).tolist(), n),
-    np.random.choice(list("ZYXWVUTSRQ"), n),
-    np.random.randn(n),
-]
-vals = list(map(tuple, zip(*vals)))
-
-# bunch of keys for testing
-keys: List[Any] = [
-    np.random.randint(0, 11, m),
-    np.random.choice(list("abcdefghijk"), m),
-    np.random.choice(pd.date_range("20141009", periods=11).tolist(), m),
-    np.random.choice(list("ZYXWVUTSRQP"), m),
-]
-keys = list(map(tuple, zip(*keys)))
-keys += [t[:-1] for t in vals[:: n // m]]
+
+@pytest.fixture
+def m():
+    return 50
+
+
+@pytest.fixture
+def n():
+    return 1000
+
+
+@pytest.fixture
+def cols():
+    return ["jim", "joe", "jolie", "joline", "jolia"]
+
+
+@pytest.fixture
+def vals(n):
+    vals = [
+        np.random.randint(0, 10, n),
+        np.random.choice(list("abcdefghij"), n),
+        np.random.choice(pd.date_range("20141009", periods=10).tolist(), n),
+        np.random.choice(list("ZYXWVUTSRQ"), n),
+        np.random.randn(n),
+    ]
+    vals = list(map(tuple, zip(*vals)))
+    return vals
+
+
+@pytest.fixture
+def keys(n, m, vals):
+    # bunch of keys for testing
+    keys = [
+        np.random.randint(0, 11, m),
+        np.random.choice(list("abcdefghijk"), m),
+        np.random.choice(pd.date_range("20141009", periods=11).tolist(), m),
+        np.random.choice(list("ZYXWVUTSRQP"), m),
+    ]
+    keys = list(map(tuple, zip(*keys)))
+    keys += [t[:-1] for t in vals[:: n // m]]
+    return keys
 
 
 # covers both unique index and non-unique index
-df = DataFrame(vals, columns=cols)
-a = pd.concat([df, df])
-b = df.drop_duplicates(subset=cols[:-1])
-
-
-def validate(mi, df, key):
-    # check indexing into a multi-index before & past the lexsort depth
-
-    mask = np.ones(len(df), dtype=bool)
-
-    # test for all partials of this key
-    for i, k in enumerate(key):
-        mask &= df.iloc[:, i] == k
-
-        if not mask.any():
-            assert key[: i + 1] not in mi.index
-            continue
-
-        assert key[: i + 1] in mi.index
-        right = df[mask].copy(deep=False)
-
-        if i + 1 != len(key):  # partial key
-            return_value = right.drop(cols[: i + 1], axis=1, inplace=True)
-            assert return_value is None
-            return_value = right.set_index(cols[i + 1 : -1], inplace=True)
-            assert return_value is None
-            tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
-
-        else:  # full key
-            return_value = right.set_index(cols[:-1], inplace=True)
-            assert return_value is None
-            if len(right) == 1:  # single hit
-                right = Series(
-                    right["jolia"].values, name=right.index[0], index=["jolia"]
-                )
-                tm.assert_series_equal(mi.loc[key[: i + 1]], right)
-            else:  # multi hit
-                tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
+@pytest.fixture
+def df(vals, cols):
+    return DataFrame(vals, columns=cols)
+
+
+@pytest.fixture
+def a(df):
+    return pd.concat([df, df])
+
+
+@pytest.fixture
+def b(df, cols):
+    return df.drop_duplicates(subset=cols[:-1])
 
 
 @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
 @pytest.mark.parametrize("lexsort_depth", list(range(5)))
-@pytest.mark.parametrize("key", keys)
-@pytest.mark.parametrize("frame", [a, b])
-def test_multiindex_get_loc(lexsort_depth, key, frame):
+@pytest.mark.parametrize("frame_fixture", ["a", "b"])
+def test_multiindex_get_loc(request, lexsort_depth, keys, frame_fixture, cols):
     # GH7724, GH2646
 
+    frame = request.getfixturevalue(frame_fixture)
     if lexsort_depth == 0:
         df = frame.copy(deep=False)
     else:
@@ -92,4 +81,34 @@ def test_multiindex_get_loc(lexsort_depth, key, frame):
 
     mi = df.set_index(cols[:-1])
     assert not mi.index._lexsort_depth < lexsort_depth
-    validate(mi, df, key)
+    for key in keys:
+        mask = np.ones(len(df), dtype=bool)
+
+        # test for all partials of this key
+        for i, k in enumerate(key):
+            mask &= df.iloc[:, i] == k
+
+            if not mask.any():
+                assert key[: i + 1] not in mi.index
+                continue
+
+            assert key[: i + 1] in mi.index
+            right = df[mask].copy(deep=False)
+
+            if i + 1 != len(key):  # partial key
+                return_value = right.drop(cols[: i + 1], axis=1, inplace=True)
+                assert return_value is None
+                return_value = right.set_index(cols[i + 1 : -1], inplace=True)
+                assert return_value is None
+                tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
+
+            else:  # full key
+                return_value = right.set_index(cols[:-1], inplace=True)
+                assert return_value is None
+                if len(right) == 1:  # single hit
+                    right = Series(
+                        right["jolia"].values, name=right.index[0], index=["jolia"]
+                    )
+                    tm.assert_series_equal(mi.loc[key[: i + 1]], right)
+                else:  # multi hit
+                    tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 15e1fae77d65b..6510612ba6f87 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -168,3 +168,21 @@ def test_getitem_str_slice_millisecond_resolution(self, frame_or_series):
             ],
         )
         tm.assert_equal(result, expected)
+
+    def test_getitem_pyarrow_index(self, frame_or_series):
+        # GH 53644
+        pytest.importorskip("pyarrow")
+        obj = frame_or_series(
+            range(5),
+            index=date_range("2020", freq="D", periods=5).astype(
+                "timestamp[us][pyarrow]"
+            ),
+        )
+        result = obj.loc[obj.index[:-3]]
+        expected = frame_or_series(
+            range(2),
+            index=date_range("2020", freq="D", periods=2).astype(
+                "timestamp[us][pyarrow]"
+            ),
+        )
+        tm.assert_equal(result, expected)
diff --git a/pandas/tests/interchange/conftest.py b/pandas/tests/interchange/conftest.py
deleted file mode 100644
index f552ba442a916..0000000000000
--- a/pandas/tests/interchange/conftest.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import pytest
-
-import pandas as pd
-
-
-@pytest.fixture
-def df_from_dict():
-    def maker(dct, is_categorical=False):
-        df = pd.DataFrame(dct)
-        return df.astype("category") if is_categorical else df
-
-    return maker
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 49873768ca952..5fce4f162d71f 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -16,47 +16,31 @@
 )
 from pandas.core.interchange.from_dataframe import from_dataframe
 
-test_data_categorical = {
-    "ordered": pd.Categorical(list("testdata") * 30, ordered=True),
-    "unordered": pd.Categorical(list("testdata") * 30, ordered=False),
-}
 
-NCOLS, NROWS = 100, 200
-
-
-def _make_data(make_one):
+@pytest.fixture
+def data_categorical():
     return {
-        f"col{int((i - NCOLS / 2) % NCOLS + 1)}": [make_one() for _ in range(NROWS)]
-        for i in range(NCOLS)
+        "ordered": pd.Categorical(list("testdata") * 30, ordered=True),
+        "unordered": pd.Categorical(list("testdata") * 30, ordered=False),
     }
 
 
-int_data = _make_data(lambda: random.randint(-100, 100))
-uint_data = _make_data(lambda: random.randint(1, 100))
-bool_data = _make_data(lambda: random.choice([True, False]))
-float_data = _make_data(lambda: random.random())
-datetime_data = _make_data(
-    lambda: datetime(
-        year=random.randint(1900, 2100),
-        month=random.randint(1, 12),
-        day=random.randint(1, 20),
-    )
-)
-
-string_data = {
-    "separator data": [
-        "abC|DeF,Hik",
-        "234,3245.67",
-        "gSaf,qWer|Gre",
-        "asd3,4sad|",
-        np.NaN,
-    ]
-}
+@pytest.fixture
+def string_data():
+    return {
+        "separator data": [
+            "abC|DeF,Hik",
+            "234,3245.67",
+            "gSaf,qWer|Gre",
+            "asd3,4sad|",
+            np.NaN,
+        ]
+    }
 
 
 @pytest.mark.parametrize("data", [("ordered", True), ("unordered", False)])
-def test_categorical_dtype(data):
-    df = pd.DataFrame({"A": (test_data_categorical[data[0]])})
+def test_categorical_dtype(data, data_categorical):
+    df = pd.DataFrame({"A": (data_categorical[data[0]])})
 
     col = df.__dataframe__().get_column_by_name("A")
     assert col.dtype[0] == DtypeKind.CATEGORICAL
@@ -143,9 +127,25 @@ def test_bitmasks_pyarrow(offset, length, expected_values):
 
 
 @pytest.mark.parametrize(
-    "data", [int_data, uint_data, float_data, bool_data, datetime_data]
+    "data",
+    [
+        lambda: random.randint(-100, 100),
+        lambda: random.randint(1, 100),
+        lambda: random.random(),
+        lambda: random.choice([True, False]),
+        lambda: datetime(
+            year=random.randint(1900, 2100),
+            month=random.randint(1, 12),
+            day=random.randint(1, 20),
+        ),
+    ],
 )
 def test_dataframe(data):
+    NCOLS, NROWS = 10, 20
+    data = {
+        f"col{int((i - NCOLS / 2) % NCOLS + 1)}": [data() for _ in range(NROWS)]
+        for i in range(NCOLS)
+    }
     df = pd.DataFrame(data)
 
     df2 = df.__dataframe__()
@@ -227,7 +227,7 @@ def test_mixed_missing():
         assert df2.get_column_by_name(col_name).null_count == 2
 
 
-def test_string():
+def test_string(string_data):
     test_str_data = string_data["separator data"] + [""]
     df = pd.DataFrame({"A": test_str_data})
     col = df.__dataframe__().get_column_by_name("A")
diff --git a/pandas/tests/interchange/test_spec_conformance.py b/pandas/tests/interchange/test_spec_conformance.py
index 965938b111e86..7c02379c11853 100644
--- a/pandas/tests/interchange/test_spec_conformance.py
+++ b/pandas/tests/interchange/test_spec_conformance.py
@@ -7,6 +7,17 @@
 
 import pytest
 
+import pandas as pd
+
+
+@pytest.fixture
+def df_from_dict():
+    def maker(dct, is_categorical=False):
+        df = pd.DataFrame(dct)
+        return df.astype("category") if is_categorical else df
+
+    return maker
+
 
 @pytest.mark.parametrize(
     "test_data",
diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py
index b863e85cae457..1fc867f95de53 100644
--- a/pandas/tests/io/conftest.py
+++ b/pandas/tests/io/conftest.py
@@ -1,4 +1,3 @@
-import os
 import shlex
 import subprocess
 import time
@@ -13,11 +12,15 @@
 )
 import pandas.util._test_decorators as td
 
-import pandas._testing as tm
-
+import pandas.io.common as icom
 from pandas.io.parsers import read_csv
 
 
+@pytest.fixture
+def compression_to_extension():
+    return {value: key for key, value in icom.extension_to_compression.items()}
+
+
 @pytest.fixture
 def tips_file(datapath):
     """Path to the tips dataset"""
@@ -52,7 +55,13 @@ def s3so(worker_id):
 
 
 @pytest.fixture(scope="session")
-def s3_base(worker_id):
+def monkeysession():
+    with pytest.MonkeyPatch.context() as mp:
+        yield mp
+
+
+@pytest.fixture(scope="session")
+def s3_base(worker_id, monkeysession):
     """
     Fixture for mocking S3 interaction.
 
@@ -62,56 +71,55 @@ def s3_base(worker_id):
     pytest.importorskip("s3fs")
     pytest.importorskip("boto3")
 
-    with tm.ensure_safe_environment_variables():
-        # temporary workaround as moto fails for botocore >= 1.11 otherwise,
-        # see https://github.com/spulec/moto/issues/1924 & 1952
-        os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
-        os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
-        if is_ci_environment():
-            if is_platform_arm() or is_platform_mac() or is_platform_windows():
-                # NOT RUN on Windows/macOS/ARM, only Ubuntu
-                # - subprocess in CI can cause timeouts
-                # - GitHub Actions do not support
-                #   container services for the above OSs
-                # - CircleCI will probably hit the Docker rate pull limit
-                pytest.skip(
-                    "S3 tests do not have a corresponding service in "
-                    "Windows, macOS or ARM platforms"
-                )
-            else:
-                yield "http://localhost:5000"
+    # temporary workaround as moto fails for botocore >= 1.11 otherwise,
+    # see https://github.com/spulec/moto/issues/1924 & 1952
+    monkeysession.setenv("AWS_ACCESS_KEY_ID", "foobar_key")
+    monkeysession.setenv("AWS_SECRET_ACCESS_KEY", "foobar_secret")
+    if is_ci_environment():
+        if is_platform_arm() or is_platform_mac() or is_platform_windows():
+            # NOT RUN on Windows/macOS/ARM, only Ubuntu
+            # - subprocess in CI can cause timeouts
+            # - GitHub Actions do not support
+            #   container services for the above OSs
+            # - CircleCI will probably hit the Docker rate pull limit
+            pytest.skip(
+                "S3 tests do not have a corresponding service in "
+                "Windows, macOS or ARM platforms"
+            )
         else:
-            requests = pytest.importorskip("requests")
-            pytest.importorskip("moto", minversion="1.3.14")
-            pytest.importorskip("flask")  # server mode needs flask too
-
-            # Launching moto in server mode, i.e., as a separate process
-            # with an S3 endpoint on localhost
-
-            worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
-            endpoint_port = f"555{worker_id}"
-            endpoint_uri = f"http://127.0.0.1:{endpoint_port}/"
-
-            # pipe to null to avoid logging in terminal
-            with subprocess.Popen(
-                shlex.split(f"moto_server s3 -p {endpoint_port}"),
-                stdout=subprocess.DEVNULL,
-                stderr=subprocess.DEVNULL,
-            ) as proc:
-                timeout = 5
-                while timeout > 0:
-                    try:
-                        # OK to go once server is accepting connections
-                        r = requests.get(endpoint_uri)
-                        if r.ok:
-                            break
-                    except Exception:
-                        pass
-                    timeout -= 0.1
-                    time.sleep(0.1)
-                yield endpoint_uri
-
-                proc.terminate()
+            yield "http://localhost:5000"
+    else:
+        requests = pytest.importorskip("requests")
+        pytest.importorskip("moto", minversion="1.3.14")
+        pytest.importorskip("flask")  # server mode needs flask too
+
+        # Launching moto in server mode, i.e., as a separate process
+        # with an S3 endpoint on localhost
+
+        worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
+        endpoint_port = f"555{worker_id}"
+        endpoint_uri = f"http://127.0.0.1:{endpoint_port}/"
+
+        # pipe to null to avoid logging in terminal
+        with subprocess.Popen(
+            shlex.split(f"moto_server s3 -p {endpoint_port}"),
+            stdout=subprocess.DEVNULL,
+            stderr=subprocess.DEVNULL,
+        ) as proc:
+            timeout = 5
+            while timeout > 0:
+                try:
+                    # OK to go once server is accepting connections
+                    r = requests.get(endpoint_uri)
+                    if r.ok:
+                        break
+                except Exception:
+                    pass
+                timeout -= 0.1
+                time.sleep(0.1)
+            yield endpoint_uri
+
+            proc.terminate()
 
 
 @pytest.fixture
diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py
index 1f8fb4b801356..509029861715e 100644
--- a/pandas/tests/io/excel/test_xlrd.py
+++ b/pandas/tests/io/excel/test_xlrd.py
@@ -10,10 +10,8 @@
 
 xlrd = pytest.importorskip("xlrd")
 
-exts = [".xls"]
 
-
-@pytest.fixture(params=exts)
+@pytest.fixture(params=[".xls"])
 def read_ext_xlrd(request):
     """
     Valid extensions for reading Excel files with xlrd.
diff --git a/pandas/tests/io/formats/style/test_html.py b/pandas/tests/io/formats/style/test_html.py
index 53062c52a29db..1e345eb82ed3c 100644
--- a/pandas/tests/io/formats/style/test_html.py
+++ b/pandas/tests/io/formats/style/test_html.py
@@ -15,8 +15,12 @@
 jinja2 = pytest.importorskip("jinja2")
 from pandas.io.formats.style import Styler
 
-loader = jinja2.PackageLoader("pandas", "io/formats/templates")
-env = jinja2.Environment(loader=loader, trim_blocks=True)
+
+@pytest.fixture
+def env():
+    loader = jinja2.PackageLoader("pandas", "io/formats/templates")
+    env = jinja2.Environment(loader=loader, trim_blocks=True)
+    return env
 
 
 @pytest.fixture
@@ -31,12 +35,12 @@ def styler_mi():
 
 
 @pytest.fixture
-def tpl_style():
+def tpl_style(env):
     return env.get_template("html_style.tpl")
 
 
 @pytest.fixture
-def tpl_table():
+def tpl_table(env):
     return env.get_template("html_table.tpl")
 
 
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 6acef0f564ef4..7436644b8636a 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -24,11 +24,6 @@
 
 from pandas._config import config
 
-from pandas.compat import (
-    IS64,
-    is_platform_windows,
-)
-
 import pandas as pd
 from pandas import (
     DataFrame,
@@ -48,8 +43,6 @@
 from pandas.io.formats import printing
 import pandas.io.formats.format as fmt
 
-use_32bit_repr = is_platform_windows() or not IS64
-
 
 def get_local_am_pm():
     """Return the AM and PM strings returned by strftime in current locale."""
diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py
index e79e135208995..cd906ca27fa79 100644
--- a/pandas/tests/io/formats/test_info.py
+++ b/pandas/tests/io/formats/test_info.py
@@ -11,7 +11,6 @@
     IS64,
     PYPY,
 )
-import pandas.util._test_decorators as td
 
 from pandas import (
     CategoricalIndex,
@@ -504,9 +503,10 @@ def test_memory_usage_empty_no_warning():
     tm.assert_series_equal(result, expected)
 
 
-@td.skip_if_no("numba")
+@pytest.mark.single_cpu
 def test_info_compute_numba():
     # GH#51922
+    pytest.importorskip("numba")
     df = DataFrame([[1, 2], [3, 4]])
 
     with option_context("compute.use_numba", True):
diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py
index 6f578b45bf71d..dc106c9bebd45 100644
--- a/pandas/tests/io/formats/test_printing.py
+++ b/pandas/tests/io/formats/test_printing.py
@@ -1,6 +1,7 @@
 import string
 
 import numpy as np
+import pytest
 
 import pandas._config.config as cf
 
@@ -207,3 +208,27 @@ def test_multiindex_long_element():
         "cccccccccccccccccccccc',)],\n           )"
     )
     assert str(data) == expected
+
+
+@pytest.mark.parametrize(
+    "data,output",
+    [
+        ([2, complex("nan"), 1], [" 2.0+0.0j", " NaN+0.0j", " 1.0+0.0j"]),
+        ([2, complex("nan"), -1], [" 2.0+0.0j", " NaN+0.0j", "-1.0+0.0j"]),
+        ([-2, complex("nan"), -1], ["-2.0+0.0j", " NaN+0.0j", "-1.0+0.0j"]),
+        ([-1.23j, complex("nan"), -1], ["-0.00-1.23j", "  NaN+0.00j", "-1.00+0.00j"]),
+        ([1.23j, complex("nan"), 1.23], [" 0.00+1.23j", "  NaN+0.00j", " 1.23+0.00j"]),
+    ],
+)
+@pytest.mark.parametrize("as_frame", [True, False])
+def test_ser_df_with_complex_nans(data, output, as_frame):
+    # GH#53762
+    obj = pd.Series(data)
+    if as_frame:
+        obj = obj.to_frame(name="val")
+        reprs = [f"{i} {val}" for i, val in enumerate(output)]
+        expected = f"{'val': >{len(reprs[0])}}\n" + "\n".join(reprs)
+    else:
+        reprs = [f"{i}   {val}" for i, val in enumerate(output)]
+        expected = "\n".join(reprs) + "\ndtype: complex128"
+    assert str(obj) == expected
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index a208daaf9f77b..32509a799fa69 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -13,7 +13,6 @@
     compat,
 )
 import pandas._testing as tm
-from pandas.tests.io.test_compression import _compression_to_extension
 
 
 class TestToCSV:
@@ -543,13 +542,15 @@ def test_to_csv_write_to_open_file_with_newline_py3(self):
 
     @pytest.mark.parametrize("to_infer", [True, False])
     @pytest.mark.parametrize("read_infer", [True, False])
-    def test_to_csv_compression(self, compression_only, read_infer, to_infer):
+    def test_to_csv_compression(
+        self, compression_only, read_infer, to_infer, compression_to_extension
+    ):
         # see gh-15008
         compression = compression_only
 
         # We'll complete file extension subsequently.
         filename = "test."
-        filename += _compression_to_extension[compression]
+        filename += compression_to_extension[compression]
 
         df = DataFrame({"A": [1]})
 
diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py
index 143d2431d4147..bbb34c0cdab3c 100644
--- a/pandas/tests/io/json/test_compression.py
+++ b/pandas/tests/io/json/test_compression.py
@@ -1,4 +1,7 @@
-from io import BytesIO
+from io import (
+    BytesIO,
+    StringIO,
+)
 
 import pytest
 
@@ -6,7 +9,6 @@
 
 import pandas as pd
 import pandas._testing as tm
-from pandas.tests.io.test_compression import _compression_to_extension
 
 
 def test_compression_roundtrip(compression):
@@ -23,7 +25,8 @@ def test_compression_roundtrip(compression):
         # explicitly ensure file was compressed.
         with tm.decompress_file(path, compression) as fh:
             result = fh.read().decode("utf8")
-        tm.assert_frame_equal(df, pd.read_json(result))
+            data = StringIO(result)
+        tm.assert_frame_equal(df, pd.read_json(data))
 
 
 def test_read_zipped_json(datapath):
@@ -40,8 +43,7 @@ def test_read_zipped_json(datapath):
 @pytest.mark.single_cpu
 def test_with_s3_url(compression, s3_resource, s3so):
     # Bucket "pandas-test" created in tests/io/conftest.py
-
-    df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
+    df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
 
     with tm.ensure_clean() as path:
         df.to_json(path, compression=compression)
@@ -56,7 +58,7 @@ def test_with_s3_url(compression, s3_resource, s3so):
 
 def test_lines_with_compression(compression):
     with tm.ensure_clean() as path:
-        df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
+        df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
         df.to_json(path, orient="records", lines=True, compression=compression)
         roundtripped_df = pd.read_json(path, lines=True, compression=compression)
         tm.assert_frame_equal(df, roundtripped_df)
@@ -64,7 +66,7 @@ def test_lines_with_compression(compression):
 
 def test_chunksize_with_compression(compression):
     with tm.ensure_clean() as path:
-        df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
+        df = pd.read_json(StringIO('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}'))
         df.to_json(path, orient="records", lines=True, compression=compression)
 
         with pd.read_json(
@@ -75,7 +77,7 @@ def test_chunksize_with_compression(compression):
 
 
 def test_write_unsupported_compression_type():
-    df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
+    df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
     with tm.ensure_clean() as path:
         msg = "Unrecognized compression type: unsupported"
         with pytest.raises(ValueError, match=msg):
@@ -91,13 +93,15 @@ def test_read_unsupported_compression_type():
 
 @pytest.mark.parametrize("to_infer", [True, False])
 @pytest.mark.parametrize("read_infer", [True, False])
-def test_to_json_compression(compression_only, read_infer, to_infer):
+def test_to_json_compression(
+    compression_only, read_infer, to_infer, compression_to_extension
+):
     # see gh-15008
     compression = compression_only
 
     # We'll complete file extension subsequently.
     filename = "test."
-    filename += _compression_to_extension[compression]
+    filename += compression_to_extension[compression]
 
     df = pd.DataFrame({"A": [1]})
 
diff --git a/pandas/tests/io/json/test_deprecated_kwargs.py b/pandas/tests/io/json/test_deprecated_kwargs.py
index 7e3296db75323..cc88fc3ba1826 100644
--- a/pandas/tests/io/json/test_deprecated_kwargs.py
+++ b/pandas/tests/io/json/test_deprecated_kwargs.py
@@ -1,6 +1,7 @@
 """
 Tests for the deprecated keyword arguments for `read_json`.
 """
+from io import StringIO
 
 import pandas as pd
 import pandas._testing as tm
@@ -10,9 +11,11 @@
 
 def test_good_kwargs():
     df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2])
+
     with tm.assert_produces_warning(None):
-        tm.assert_frame_equal(df, read_json(df.to_json(orient="split"), orient="split"))
-        tm.assert_frame_equal(
-            df, read_json(df.to_json(orient="columns"), orient="columns")
-        )
-        tm.assert_frame_equal(df, read_json(df.to_json(orient="index"), orient="index"))
+        data1 = StringIO(df.to_json(orient="split"))
+        tm.assert_frame_equal(df, read_json(data1, orient="split"))
+        data2 = StringIO(df.to_json(orient="columns"))
+        tm.assert_frame_equal(df, read_json(data2, orient="columns"))
+        data3 = StringIO(df.to_json(orient="index"))
+        tm.assert_frame_equal(df, read_json(data3, orient="index"))
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 48ab0f1be8c4a..25b0e4a9f1de9 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -1,5 +1,6 @@
 """Tests for Table Schema integration."""
 from collections import OrderedDict
+from io import StringIO
 import json
 
 import numpy as np
@@ -254,7 +255,8 @@ def test_read_json_from_to_json_results(self):
                 "name_en": {"row_0": "Hakata Dolls Matsuo"},
             }
         )
-        result1 = pd.read_json(df.to_json())
+
+        result1 = pd.read_json(StringIO(df.to_json()))
         result2 = DataFrame.from_dict(json.loads(df.to_json()))
         tm.assert_frame_equal(result1, df)
         tm.assert_frame_equal(result2, df)
@@ -794,7 +796,7 @@ def test_comprehensive(self):
             index=pd.Index(range(4), name="idx"),
         )
 
-        out = df.to_json(orient="table")
+        out = StringIO(df.to_json(orient="table"))
         result = pd.read_json(out, orient="table")
         tm.assert_frame_equal(df, result)
 
@@ -810,7 +812,7 @@ def test_multiindex(self, index_names):
             columns=["Aussprache", "Griechisch", "Args"],
         )
         df.index.names = index_names
-        out = df.to_json(orient="table")
+        out = StringIO(df.to_json(orient="table"))
         result = pd.read_json(out, orient="table")
         tm.assert_frame_equal(df, result)
 
@@ -818,7 +820,7 @@ def test_empty_frame_roundtrip(self):
         # GH 21287
         df = DataFrame(columns=["a", "b", "c"])
         expected = df.copy()
-        out = df.to_json(orient="table")
+        out = StringIO(df.to_json(orient="table"))
         result = pd.read_json(out, orient="table")
         tm.assert_frame_equal(expected, result)
 
@@ -841,5 +843,5 @@ def test_read_json_orient_table_old_schema_version(self):
         }
         """
         expected = DataFrame({"a": [1, 2.0, "s"]})
-        result = pd.read_json(df_json, orient="table")
+        result = pd.read_json(StringIO(df_json), orient="table")
         tm.assert_frame_equal(expected, result)
diff --git a/pandas/tests/io/json/test_json_table_schema_ext_dtype.py b/pandas/tests/io/json/test_json_table_schema_ext_dtype.py
index 75845148f6581..b7bb057bc538e 100644
--- a/pandas/tests/io/json/test_json_table_schema_ext_dtype.py
+++ b/pandas/tests/io/json/test_json_table_schema_ext_dtype.py
@@ -3,6 +3,7 @@
 from collections import OrderedDict
 import datetime as dt
 import decimal
+from io import StringIO
 import json
 
 import pytest
@@ -287,7 +288,7 @@ def test_json_ext_dtype_reading_roundtrip(self):
         )
         expected = df.copy()
         data_json = df.to_json(orient="table", indent=4)
-        result = read_json(data_json, orient="table")
+        result = read_json(StringIO(data_json), orient="table")
         tm.assert_frame_equal(result, expected)
 
     def test_json_ext_dtype_reading(self):
@@ -311,6 +312,6 @@ def test_json_ext_dtype_reading(self):
                 }
             ]
         }"""
-        result = read_json(data_json, orient="table")
+        result = read_json(StringIO(data_json), orient="table")
         expected = DataFrame({"a": Series([2, NA], dtype="Int64")})
         tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index a966ad1dabcaa..ea996e82ae3a6 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -29,6 +29,51 @@
 )
 
 
+def test_literal_json_deprecation():
+    # PR 53409
+    expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
+
+    jsonl = """{"a": 1, "b": 2}
+        {"a": 3, "b": 4}
+        {"a": 5, "b": 6}
+        {"a": 7, "b": 8}"""
+
+    msg = (
+        "Passing literal json to 'read_json' is deprecated and "
+        "will be removed in a future version. To read from a "
+        "literal string, wrap it in a 'StringIO' object."
+    )
+
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        try:
+            read_json(jsonl, lines=False)
+        except ValueError:
+            pass
+
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        read_json(expected.to_json(), lines=False)
+
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
+        tm.assert_frame_equal(result, expected)
+
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        try:
+            result = read_json(
+                '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n',
+                lines=False,
+            )
+        except ValueError:
+            pass
+
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        try:
+            result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=False)
+        except ValueError:
+            pass
+        tm.assert_frame_equal(result, expected)
+
+
 def assert_json_roundtrip_equal(result, expected, orient):
     if orient in ("records", "values"):
         expected = expected.reset_index(drop=True)
@@ -74,15 +119,16 @@ def test_frame_double_encoded_labels(self, orient):
             columns=["a \\ b", "y / z"],
         )
 
-        result = read_json(df.to_json(orient=orient), orient=orient)
+        data = StringIO(df.to_json(orient=orient))
+        result = read_json(data, orient=orient)
         expected = df.copy()
-
         assert_json_roundtrip_equal(result, expected, orient)
 
     @pytest.mark.parametrize("orient", ["split", "records", "values"])
     def test_frame_non_unique_index(self, orient):
         df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
-        result = read_json(df.to_json(orient=orient), orient=orient)
+        data = StringIO(df.to_json(orient=orient))
+        result = read_json(data, orient=orient)
         expected = df.copy()
 
         assert_json_roundtrip_equal(result, expected, orient)
@@ -108,7 +154,7 @@ def test_frame_non_unique_columns(self, orient, data):
         df = DataFrame(data, index=[1, 2], columns=["x", "x"])
 
         result = read_json(
-            df.to_json(orient=orient), orient=orient, convert_dates=["x"]
+            StringIO(df.to_json(orient=orient)), orient=orient, convert_dates=["x"]
         )
         if orient == "values":
             expected = DataFrame(data)
@@ -138,7 +184,7 @@ def test_frame_default_orient(self, float_frame):
     @pytest.mark.parametrize("dtype", [False, float])
     @pytest.mark.parametrize("convert_axes", [True, False])
     def test_roundtrip_simple(self, orient, convert_axes, dtype, float_frame):
-        data = float_frame.to_json(orient=orient)
+        data = StringIO(float_frame.to_json(orient=orient))
         result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
 
         expected = float_frame
@@ -148,7 +194,7 @@ def test_roundtrip_simple(self, orient, convert_axes, dtype, float_frame):
     @pytest.mark.parametrize("dtype", [False, np.int64])
     @pytest.mark.parametrize("convert_axes", [True, False])
     def test_roundtrip_intframe(self, orient, convert_axes, dtype, int_frame):
-        data = int_frame.to_json(orient=orient)
+        data = StringIO(int_frame.to_json(orient=orient))
         result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
         expected = int_frame
         assert_json_roundtrip_equal(result, expected, orient)
@@ -163,7 +209,7 @@ def test_roundtrip_str_axes(self, orient, convert_axes, dtype):
             dtype=dtype,
         )
 
-        data = df.to_json(orient=orient)
+        data = StringIO(df.to_json(orient=orient))
         result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
 
         expected = df.copy()
@@ -197,8 +243,7 @@ def test_roundtrip_categorical(
                 )
             )
 
-        data = categorical_frame.to_json(orient=orient)
-
+        data = StringIO(categorical_frame.to_json(orient=orient))
         result = read_json(data, orient=orient, convert_axes=convert_axes)
 
         expected = categorical_frame.copy()
@@ -209,7 +254,7 @@ def test_roundtrip_categorical(
     @pytest.mark.parametrize("convert_axes", [True, False])
     def test_roundtrip_empty(self, orient, convert_axes):
         empty_frame = DataFrame()
-        data = empty_frame.to_json(orient=orient)
+        data = StringIO(empty_frame.to_json(orient=orient))
         result = read_json(data, orient=orient, convert_axes=convert_axes)
         if orient == "split":
             idx = pd.Index([], dtype=(float if convert_axes else object))
@@ -224,7 +269,7 @@ def test_roundtrip_empty(self, orient, convert_axes):
     @pytest.mark.parametrize("convert_axes", [True, False])
     def test_roundtrip_timestamp(self, orient, convert_axes, datetime_frame):
         # TODO: improve coverage with date_format parameter
-        data = datetime_frame.to_json(orient=orient)
+        data = StringIO(datetime_frame.to_json(orient=orient))
         result = read_json(data, orient=orient, convert_axes=convert_axes)
         expected = datetime_frame.copy()
 
@@ -250,7 +295,7 @@ def test_roundtrip_mixed(self, orient, convert_axes):
 
         df = DataFrame(data=values, index=index)
 
-        data = df.to_json(orient=orient)
+        data = StringIO(df.to_json(orient=orient))
         result = read_json(data, orient=orient, convert_axes=convert_axes)
 
         expected = df.copy()
@@ -275,9 +320,8 @@ def test_roundtrip_multiindex(self, columns):
             [[1, 2], [3, 4]],
             columns=pd.MultiIndex.from_arrays(columns),
         )
-
-        result = read_json(df.to_json(orient="split"), orient="split")
-
+        data = StringIO(df.to_json(orient="split"))
+        result = read_json(data, orient="split")
         tm.assert_frame_equal(result, df)
 
     @pytest.mark.parametrize(
@@ -322,8 +366,9 @@ def test_frame_from_json_bad_data_raises(self, data, msg, orient):
     @pytest.mark.parametrize("convert_axes", [True, False])
     def test_frame_from_json_missing_data(self, orient, convert_axes, dtype):
         num_df = DataFrame([[1, 2], [4, 5, 6]])
+
         result = read_json(
-            num_df.to_json(orient=orient),
+            StringIO(num_df.to_json(orient=orient)),
             orient=orient,
             convert_axes=convert_axes,
             dtype=dtype,
@@ -332,7 +377,7 @@ def test_frame_from_json_missing_data(self, orient, convert_axes, dtype):
 
         obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
         result = read_json(
-            obj_df.to_json(orient=orient),
+            StringIO(obj_df.to_json(orient=orient)),
             orient=orient,
             convert_axes=convert_axes,
             dtype=dtype,
@@ -343,7 +388,7 @@ def test_frame_from_json_missing_data(self, orient, convert_axes, dtype):
     def test_frame_read_json_dtype_missing_value(self, dtype):
         # GH28501 Parse missing values using read_json with dtype=False
         # to NaN instead of None
-        result = read_json("[null]", dtype=dtype)
+        result = read_json(StringIO("[null]"), dtype=dtype)
         expected = DataFrame([np.nan])
 
         tm.assert_frame_equal(result, expected)
@@ -355,7 +400,9 @@ def test_frame_infinity(self, inf, dtype):
         # deserialisation
         df = DataFrame([[1, 2], [4, 5, 6]])
         df.loc[0, 2] = inf
-        result = read_json(df.to_json(), dtype=dtype)
+
+        data = StringIO(df.to_json())
+        result = read_json(data, dtype=dtype)
         assert np.isnan(result.iloc[0, 2])
 
     @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")
@@ -384,11 +431,15 @@ def test_frame_to_json_except(self):
     def test_frame_empty(self):
         df = DataFrame(columns=["jim", "joe"])
         assert not df._is_mixed_type
-        tm.assert_frame_equal(
-            read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
-        )
+
+        data = StringIO(df.to_json())
+        result = read_json(data, dtype=dict(df.dtypes))
+        tm.assert_frame_equal(result, df, check_index_type=False)
+
+    def test_frame_empty_to_json(self):
         # GH 7445
-        result = DataFrame({"test": []}, index=[]).to_json(orient="columns")
+        df = DataFrame({"test": []}, index=[])
+        result = df.to_json(orient="columns")
         expected = '{"test":{}}'
         assert result == expected
 
@@ -397,8 +448,11 @@ def test_frame_empty_mixedtype(self):
         df = DataFrame(columns=["jim", "joe"])
         df["joe"] = df["joe"].astype("i8")
         assert df._is_mixed_type
+        data = df.to_json()
         tm.assert_frame_equal(
-            read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
+            read_json(StringIO(data), dtype=dict(df.dtypes)),
+            df,
+            check_index_type=False,
         )
 
     def test_frame_mixedtype_orient(self):  # GH10289
@@ -417,17 +471,17 @@ def test_frame_mixedtype_orient(self):  # GH10289
         right = df.copy()
 
         for orient in ["split", "index", "columns"]:
-            inp = df.to_json(orient=orient)
+            inp = StringIO(df.to_json(orient=orient))
             left = read_json(inp, orient=orient, convert_axes=False)
             tm.assert_frame_equal(left, right)
 
         right.index = pd.RangeIndex(len(df))
-        inp = df.to_json(orient="records")
+        inp = StringIO(df.to_json(orient="records"))
         left = read_json(inp, orient="records", convert_axes=False)
         tm.assert_frame_equal(left, right)
 
         right.columns = pd.RangeIndex(df.shape[1])
-        inp = df.to_json(orient="values")
+        inp = StringIO(df.to_json(orient="values"))
         left = read_json(inp, orient="values", convert_axes=False)
         tm.assert_frame_equal(left, right)
 
@@ -546,8 +600,8 @@ def test_blocks_compat_GH9037(self):
 
         # JSON deserialisation always creates unicode strings
         df_mixed.columns = df_mixed.columns.astype("unicode")
-
-        df_roundtrip = read_json(df_mixed.to_json(orient="split"), orient="split")
+        data = StringIO(df_mixed.to_json(orient="split"))
+        df_roundtrip = read_json(data, orient="split")
         tm.assert_frame_equal(
             df_mixed,
             df_roundtrip,
@@ -609,10 +663,13 @@ def test_series_non_unique_index(self):
             s.to_json(orient="index")
 
         tm.assert_series_equal(
-            s, read_json(s.to_json(orient="split"), orient="split", typ="series")
+            s,
+            read_json(
+                StringIO(s.to_json(orient="split")), orient="split", typ="series"
+            ),
         )
         unserialized = read_json(
-            s.to_json(orient="records"), orient="records", typ="series"
+            StringIO(s.to_json(orient="records")), orient="records", typ="series"
         )
         tm.assert_numpy_array_equal(s.values, unserialized.values)
 
@@ -620,7 +677,7 @@ def test_series_default_orient(self, string_series):
         assert string_series.to_json() == string_series.to_json(orient="index")
 
     def test_series_roundtrip_simple(self, orient, string_series):
-        data = string_series.to_json(orient=orient)
+        data = StringIO(string_series.to_json(orient=orient))
         result = read_json(data, typ="series", orient=orient)
 
         expected = string_series
@@ -633,7 +690,7 @@ def test_series_roundtrip_simple(self, orient, string_series):
 
     @pytest.mark.parametrize("dtype", [False, None])
     def test_series_roundtrip_object(self, orient, dtype, object_series):
-        data = object_series.to_json(orient=orient)
+        data = StringIO(object_series.to_json(orient=orient))
         result = read_json(data, typ="series", orient=orient, dtype=dtype)
 
         expected = object_series
@@ -646,7 +703,7 @@ def test_series_roundtrip_object(self, orient, dtype, object_series):
 
     def test_series_roundtrip_empty(self, orient):
         empty_series = Series([], index=[], dtype=np.float64)
-        data = empty_series.to_json(orient=orient)
+        data = StringIO(empty_series.to_json(orient=orient))
         result = read_json(data, typ="series", orient=orient)
 
         expected = empty_series.reset_index(drop=True)
@@ -656,7 +713,7 @@ def test_series_roundtrip_empty(self, orient):
         tm.assert_series_equal(result, expected)
 
     def test_series_roundtrip_timeseries(self, orient, datetime_series):
-        data = datetime_series.to_json(orient=orient)
+        data = StringIO(datetime_series.to_json(orient=orient))
         result = read_json(data, typ="series", orient=orient)
 
         expected = datetime_series
@@ -670,7 +727,7 @@ def test_series_roundtrip_timeseries(self, orient, datetime_series):
     @pytest.mark.parametrize("dtype", [np.float64, int])
     def test_series_roundtrip_numeric(self, orient, dtype):
         s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
-        data = s.to_json(orient=orient)
+        data = StringIO(s.to_json(orient=orient))
         result = read_json(data, typ="series", orient=orient)
 
         expected = s.copy()
@@ -687,13 +744,13 @@ def test_series_to_json_except(self):
 
     def test_series_from_json_precise_float(self):
         s = Series([4.56, 4.56, 4.56])
-        result = read_json(s.to_json(), typ="series", precise_float=True)
+        result = read_json(StringIO(s.to_json()), typ="series", precise_float=True)
         tm.assert_series_equal(result, s, check_index_type=False)
 
     def test_series_with_dtype(self):
         # GH 21986
         s = Series([4.56, 4.56, 4.56])
-        result = read_json(s.to_json(), typ="series", dtype=np.int64)
+        result = read_json(StringIO(s.to_json()), typ="series", dtype=np.int64)
         expected = Series([4] * 3)
         tm.assert_series_equal(result, expected)
 
@@ -706,28 +763,27 @@ def test_series_with_dtype(self):
     )
     def test_series_with_dtype_datetime(self, dtype, expected):
         s = Series(["2000-01-01"], dtype="datetime64[ns]")
-        data = s.to_json()
+        data = StringIO(s.to_json())
         result = read_json(data, typ="series", dtype=dtype)
         tm.assert_series_equal(result, expected)
 
     def test_frame_from_json_precise_float(self):
         df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
-        result = read_json(df.to_json(), precise_float=True)
+        result = read_json(StringIO(df.to_json()), precise_float=True)
         tm.assert_frame_equal(result, df)
 
     def test_typ(self):
         s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
-        result = read_json(s.to_json(), typ=None)
+        result = read_json(StringIO(s.to_json()), typ=None)
         tm.assert_series_equal(result, s)
 
     def test_reconstruction_index(self):
         df = DataFrame([[1, 2, 3], [4, 5, 6]])
-        result = read_json(df.to_json())
-
+        result = read_json(StringIO(df.to_json()))
         tm.assert_frame_equal(result, df)
 
         df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
-        result = read_json(df.to_json())
+        result = read_json(StringIO(df.to_json()))
         tm.assert_frame_equal(result, df)
 
     def test_path(self, float_frame, int_frame, datetime_frame):
@@ -738,12 +794,12 @@ def test_path(self, float_frame, int_frame, datetime_frame):
 
     def test_axis_dates(self, datetime_series, datetime_frame):
         # frame
-        json = datetime_frame.to_json()
+        json = StringIO(datetime_frame.to_json())
         result = read_json(json)
         tm.assert_frame_equal(result, datetime_frame)
 
         # series
-        json = datetime_series.to_json()
+        json = StringIO(datetime_series.to_json())
         result = read_json(json, typ="series")
         tm.assert_series_equal(result, datetime_series, check_names=False)
         assert result.name is None
@@ -753,12 +809,12 @@ def test_convert_dates(self, datetime_series, datetime_frame):
         df = datetime_frame
         df["date"] = Timestamp("20130101").as_unit("ns")
 
-        json = df.to_json()
+        json = StringIO(df.to_json())
         result = read_json(json)
         tm.assert_frame_equal(result, df)
 
         df["foo"] = 1.0
-        json = df.to_json(date_unit="ns")
+        json = StringIO(df.to_json(date_unit="ns"))
 
         result = read_json(json, convert_dates=False)
         expected = df.copy()
@@ -768,7 +824,7 @@ def test_convert_dates(self, datetime_series, datetime_frame):
 
         # series
         ts = Series(Timestamp("20130101").as_unit("ns"), index=datetime_series.index)
-        json = ts.to_json()
+        json = StringIO(ts.to_json())
         result = read_json(json, typ="series")
         tm.assert_series_equal(result, ts)
 
@@ -815,7 +871,8 @@ def test_convert_dates_infer(self, infer_word):
         expected = DataFrame(
             [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
         )
-        result = read_json(dumps(data))[["id", infer_word]]
+
+        result = read_json(StringIO(dumps(data)))[["id", infer_word]]
         tm.assert_frame_equal(result, expected)
 
     @pytest.mark.parametrize(
@@ -838,7 +895,8 @@ def test_date_format_frame(self, date, date_unit, datetime_frame):
             json = df.to_json(date_format="iso", date_unit=date_unit)
         else:
             json = df.to_json(date_format="iso")
-        result = read_json(json)
+
+        result = read_json(StringIO(json))
         expected = df.copy()
         tm.assert_frame_equal(result, expected)
 
@@ -866,7 +924,8 @@ def test_date_format_series(self, date, date_unit, datetime_series):
             json = ts.to_json(date_format="iso", date_unit=date_unit)
         else:
             json = ts.to_json(date_format="iso")
-        result = read_json(json, typ="series")
+
+        result = read_json(StringIO(json), typ="series")
         expected = ts.copy()
         tm.assert_series_equal(result, expected)
 
@@ -888,11 +947,11 @@ def test_date_unit(self, unit, datetime_frame):
         json = df.to_json(date_format="epoch", date_unit=unit)
 
         # force date unit
-        result = read_json(json, date_unit=unit)
+        result = read_json(StringIO(json), date_unit=unit)
         tm.assert_frame_equal(result, df)
 
         # detect date unit
-        result = read_json(json, date_unit=None)
+        result = read_json(StringIO(json), date_unit=None)
         tm.assert_frame_equal(result, df)
 
     def test_weird_nested_json(self):
@@ -914,8 +973,7 @@ def test_weird_nested_json(self):
            ]
           }
         }"""
-
-        read_json(s)
+        read_json(StringIO(s))
 
     def test_doc_example(self):
         dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
@@ -924,7 +982,7 @@ def test_doc_example(self):
         dfj2["bools"] = True
         dfj2.index = pd.date_range("20130101", periods=5)
 
-        json = dfj2.to_json()
+        json = StringIO(dfj2.to_json())
         result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
         tm.assert_frame_equal(result, result)
 
@@ -933,7 +991,8 @@ def test_round_trip_exception(self, datapath):
         path = datapath("io", "json", "data", "teams.csv")
         df = pd.read_csv(path)
         s = df.to_json()
-        result = read_json(s)
+
+        result = read_json(StringIO(s))
         res = result.reindex(index=df.index, columns=df.columns)
         res = res.fillna(np.nan, downcast=False)
         tm.assert_frame_equal(res, df)
@@ -962,17 +1021,19 @@ def test_timedelta(self):
         ser = Series([timedelta(23), timedelta(seconds=5)])
         assert ser.dtype == "timedelta64[ns]"
 
-        result = read_json(ser.to_json(), typ="series").apply(converter)
+        result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
         tm.assert_series_equal(result, ser)
 
         ser = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
         assert ser.dtype == "timedelta64[ns]"
-        result = read_json(ser.to_json(), typ="series").apply(converter)
+        result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
         tm.assert_series_equal(result, ser)
 
         frame = DataFrame([timedelta(23), timedelta(seconds=5)])
         assert frame[0].dtype == "timedelta64[ns]"
-        tm.assert_frame_equal(frame, read_json(frame.to_json()).apply(converter))
+        tm.assert_frame_equal(
+            frame, read_json(StringIO(frame.to_json())).apply(converter)
+        )
 
     def test_timedelta2(self):
         frame = DataFrame(
@@ -982,8 +1043,8 @@ def test_timedelta2(self):
                 "c": pd.date_range(start="20130101", periods=2),
             }
         )
-
-        result = read_json(frame.to_json(date_unit="ns"))
+        data = StringIO(frame.to_json(date_unit="ns"))
+        result = read_json(data)
         result["a"] = pd.to_timedelta(result.a, unit="ns")
         result["c"] = pd.to_datetime(result.c)
         tm.assert_frame_equal(frame, result)
@@ -996,7 +1057,8 @@ def test_mixed_timedelta_datetime(self):
         expected = DataFrame(
             {"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]}
         )
-        result = read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
+        data = StringIO(frame.to_json(date_unit="ns"))
+        result = read_json(data, dtype={"a": "int64"})
         tm.assert_frame_equal(result, expected, check_index_type=False)
 
     @pytest.mark.parametrize("as_object", [True, False])
@@ -1026,7 +1088,7 @@ def test_default_handler(self):
         value = object()
         frame = DataFrame({"a": [7, value]})
         expected = DataFrame({"a": [7, str(value)]})
-        result = read_json(frame.to_json(default_handler=str))
+        result = read_json(StringIO(frame.to_json(default_handler=str)))
         tm.assert_frame_equal(expected, result, check_index_type=False)
 
     def test_default_handler_indirect(self):
@@ -1198,7 +1260,8 @@ def test_tz_range_is_naive(self):
 
     def test_read_inline_jsonl(self):
         # GH9180
-        result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
+
+        result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)
         expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
         tm.assert_frame_equal(result, expected)
 
@@ -1234,7 +1297,7 @@ def test_read_jsonl_unicode_chars(self):
         tm.assert_frame_equal(result, expected)
 
         # simulate string
-        json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
+        json = StringIO('{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n')
         result = read_json(json, lines=True)
         expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
         tm.assert_frame_equal(result, expected)
@@ -1289,14 +1352,15 @@ def test_to_jsonl(self):
         result = df.to_json(orient="records", lines=True)
         expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
         assert result == expected
-        tm.assert_frame_equal(read_json(result, lines=True), df)
+        tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
 
         # GH15096: escaped characters in columns and data
         df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
         result = df.to_json(orient="records", lines=True)
         expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
         assert result == expected
-        tm.assert_frame_equal(read_json(result, lines=True), df)
+
+        tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
 
     # TODO: there is a near-identical test for pytables; can we share?
     @pytest.mark.xfail(reason="GH#13774 encoding kwarg not supported", raises=TypeError)
@@ -1327,7 +1391,7 @@ def test_latin_encoding(self):
         def roundtrip(s, encoding="latin-1"):
             with tm.ensure_clean("test.json") as path:
                 s.to_json(path, encoding=encoding)
-                retr = read_json(path, encoding=encoding)
+                retr = read_json(StringIO(path), encoding=encoding)
                 tm.assert_series_equal(s, retr, check_categorical=False)
 
         for s in examples:
@@ -1351,14 +1415,15 @@ def test_from_json_to_json_table_index_and_columns(self, index, columns):
         # GH25433 GH25435
         expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
         dfjson = expected.to_json(orient="table")
-        result = read_json(dfjson, orient="table")
+
+        result = read_json(StringIO(dfjson), orient="table")
         tm.assert_frame_equal(result, expected)
 
     def test_from_json_to_json_table_dtypes(self):
         # GH21345
         expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
         dfjson = expected.to_json(orient="table")
-        result = read_json(dfjson, orient="table")
+        result = read_json(StringIO(dfjson), orient="table")
         tm.assert_frame_equal(result, expected)
 
     @pytest.mark.parametrize("orient", ["split", "records", "index", "columns"])
@@ -1377,8 +1442,9 @@ def test_to_json_from_json_columns_dtypes(self, orient):
             }
         )
         dfjson = expected.to_json(orient=orient)
+
         result = read_json(
-            dfjson,
+            StringIO(dfjson),
             orient=orient,
             dtype={
                 "Integer": "int64",
@@ -1405,8 +1471,7 @@ def test_read_json_table_empty_axes_dtype(self, orient):
         # GH28558
 
         expected = DataFrame()
-        result = read_json("{}", orient=orient, convert_axes=True)
-
+        result = read_json(StringIO("{}"), orient=orient, convert_axes=True)
         tm.assert_index_equal(result.index, expected.index)
         tm.assert_index_equal(result.columns, expected.columns)
 
@@ -1519,13 +1584,13 @@ def test_index_false_from_json_to_json(self, orient, index):
         # Test index=False in from_json to_json
         expected = DataFrame({"a": [1, 2], "b": [3, 4]})
         dfjson = expected.to_json(orient=orient, index=index)
-        result = read_json(dfjson, orient=orient)
+        result = read_json(StringIO(dfjson), orient=orient)
         tm.assert_frame_equal(result, expected)
 
     def test_read_timezone_information(self):
         # GH 25546
         result = read_json(
-            '{"2019-01-01T11:00:00.000Z":88}', typ="series", orient="index"
+            StringIO('{"2019-01-01T11:00:00.000Z":88}'), typ="series", orient="index"
         )
         expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC"))
         tm.assert_series_equal(result, expected)
@@ -1541,7 +1606,7 @@ def test_read_timezone_information(self):
     )
     def test_read_json_with_url_value(self, url):
         # GH 36271
-        result = read_json(f'{{"url":{{"0":"{url}"}}}}')
+        result = read_json(StringIO(f'{{"url":{{"0":"{url}"}}}}'))
         expected = DataFrame({"url": [url]})
         tm.assert_frame_equal(result, expected)
 
@@ -1746,7 +1811,9 @@ def test_json_negative_indent_raises(self):
 
     def test_emca_262_nan_inf_support(self):
         # GH 12213
-        data = '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]'
+        data = StringIO(
+            '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]'
+        )
         result = read_json(data)
         expected = DataFrame(
             ["a", None, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"]
@@ -1757,7 +1824,7 @@ def test_frame_int_overflow(self):
         # GH 30320
         encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}])
         expected = DataFrame({"col": ["31900441201190696999", "Text"]})
-        result = read_json(encoded_json)
+        result = read_json(StringIO(encoded_json))
         tm.assert_frame_equal(result, expected)
 
     @pytest.mark.parametrize(
@@ -1802,7 +1869,7 @@ def test_json_pandas_nulls(self, nulls_fixture, request):
 
     def test_readjson_bool_series(self):
         # GH31464
-        result = read_json("[true, true, false]", typ="series")
+        result = read_json(StringIO("[true, true, false]"), typ="series")
         expected = Series([True, True, False])
         tm.assert_series_equal(result, expected)
 
@@ -1918,7 +1985,9 @@ def test_read_json_dtype_backend(self, string_storage, dtype_backend, orient):
 
         out = df.to_json(orient=orient)
         with pd.option_context("mode.string_storage", string_storage):
-            result = read_json(out, dtype_backend=dtype_backend, orient=orient)
+            result = read_json(
+                StringIO(out), dtype_backend=dtype_backend, orient=orient
+            )
 
         expected = DataFrame(
             {
@@ -1957,7 +2026,7 @@ def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
         out = ser.to_json(orient=orient)
         with pd.option_context("mode.string_storage", string_storage):
             result = read_json(
-                out, dtype_backend=dtype_backend, orient=orient, typ="series"
+                StringIO(out), dtype_backend=dtype_backend, orient=orient, typ="series"
             )
 
         expected = Series([1, np.nan, 3], dtype="Int64")
diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py
index 7d7614bc93845..54f4980b1e4e3 100644
--- a/pandas/tests/io/json/test_readlines.py
+++ b/pandas/tests/io/json/test_readlines.py
@@ -23,7 +23,7 @@ def lines_json_df():
 
 def test_read_jsonl():
     # GH9180
-    result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
+    result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)
     expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
     tm.assert_frame_equal(result, expected)
 
@@ -50,7 +50,11 @@ def test_read_datetime(request, engine):
         columns=["accounts", "date", "name"],
     )
     json_line = df.to_json(lines=True, orient="records")
-    result = read_json(json_line, engine=engine)
+
+    if engine == "pyarrow":
+        result = read_json(StringIO(json_line), engine=engine)
+    else:
+        result = read_json(StringIO(json_line), engine=engine)
     expected = DataFrame(
         [[1, "2020-03-05", "hector"], [2, "2020-04-08T09:58:49+00:00", "hector"]],
         columns=["accounts", "date", "name"],
@@ -71,7 +75,7 @@ def test_read_jsonl_unicode_chars():
 
     # simulate string
     json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
-    result = read_json(json, lines=True)
+    result = read_json(StringIO(json), lines=True)
     expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
     tm.assert_frame_equal(result, expected)
 
@@ -87,14 +91,14 @@ def test_to_jsonl():
     result = df.to_json(orient="records", lines=True)
     expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
     assert result == expected
-    tm.assert_frame_equal(read_json(result, lines=True), df)
+    tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
 
     # GH15096: escaped characters in columns and data
     df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
     result = df.to_json(orient="records", lines=True)
     expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
     assert result == expected
-    tm.assert_frame_equal(read_json(result, lines=True), df)
+    tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
 
 
 def test_to_jsonl_count_new_lines():
@@ -256,7 +260,7 @@ def test_readjson_chunks_multiple_empty_lines(chunksize):
     {"A":3,"B":6}
     """
     orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
-    test = read_json(j, lines=True, chunksize=chunksize)
+    test = read_json(StringIO(j), lines=True, chunksize=chunksize)
     if chunksize is not None:
         with test:
             test = pd.concat(test)
@@ -290,7 +294,7 @@ def test_readjson_nrows(nrows, engine):
         {"a": 3, "b": 4}
         {"a": 5, "b": 6}
         {"a": 7, "b": 8}"""
-    result = read_json(jsonl, lines=True, nrows=nrows)
+    result = read_json(StringIO(jsonl), lines=True, nrows=nrows)
     expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
     tm.assert_frame_equal(result, expected)
 
@@ -311,10 +315,17 @@ def test_readjson_nrows_chunks(request, nrows, chunksize, engine):
         {"a": 3, "b": 4}
         {"a": 5, "b": 6}
         {"a": 7, "b": 8}"""
-    with read_json(
-        jsonl, lines=True, nrows=nrows, chunksize=chunksize, engine=engine
-    ) as reader:
-        chunked = pd.concat(reader)
+
+    if engine != "pyarrow":
+        with read_json(
+            StringIO(jsonl), lines=True, nrows=nrows, chunksize=chunksize, engine=engine
+        ) as reader:
+            chunked = pd.concat(reader)
+    else:
+        with read_json(
+            jsonl, lines=True, nrows=nrows, chunksize=chunksize, engine=engine
+        ) as reader:
+            chunked = pd.concat(reader)
     expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
     tm.assert_frame_equal(chunked, expected)
 
diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py
index bcba9c4a1823d..d150b52258d47 100644
--- a/pandas/tests/io/parser/test_compression.py
+++ b/pandas/tests/io/parser/test_compression.py
@@ -12,7 +12,6 @@
 
 from pandas import DataFrame
 import pandas._testing as tm
-from pandas.tests.io.test_compression import _compression_to_extension
 
 skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
 
@@ -91,11 +90,18 @@ def test_zip_error_invalid_zip(parser_and_data):
 
 @skip_pyarrow
 @pytest.mark.parametrize("filename", [None, "test.{ext}"])
-def test_compression(request, parser_and_data, compression_only, buffer, filename):
+def test_compression(
+    request,
+    parser_and_data,
+    compression_only,
+    buffer,
+    filename,
+    compression_to_extension,
+):
     parser, data, expected = parser_and_data
     compress_type = compression_only
 
-    ext = _compression_to_extension[compress_type]
+    ext = compression_to_extension[compress_type]
     filename = filename if filename is None else filename.format(ext=ext)
 
     if filename and buffer:
diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py
index 5709e7e4027e8..4acbb82a5f23f 100644
--- a/pandas/tests/io/parser/test_mangle_dupes.py
+++ b/pandas/tests/io/parser/test_mangle_dupes.py
@@ -163,3 +163,14 @@ def test_mangle_dupe_cols_already_exists_unnamed_col(all_parsers):
         columns=["Unnamed: 0.1", "Unnamed: 0", "Unnamed: 2.1", "Unnamed: 2"],
     )
     tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow
+@pytest.mark.parametrize("usecol, engine", [([0, 1, 1], "python"), ([0, 1, 1], "c")])
+def test_mangle_cols_names(all_parsers, usecol, engine):
+    # GH 11823
+    parser = all_parsers
+    data = "1,2,3"
+    names = ["A", "A", "B"]
+    with pytest.raises(ValueError, match="Duplicate names"):
+        parser.read_csv(StringIO(data), names=names, usecols=usecol, engine=engine)
diff --git a/pandas/tests/io/parser/test_multi_thread.py b/pandas/tests/io/parser/test_multi_thread.py
index ab278470934a5..562b99090dfab 100644
--- a/pandas/tests/io/parser/test_multi_thread.py
+++ b/pandas/tests/io/parser/test_multi_thread.py
@@ -15,7 +15,11 @@
 
 # We'll probably always skip these for pyarrow
 # Maybe we'll add our own tests for pyarrow too
-pytestmark = pytest.mark.usefixtures("pyarrow_skip")
+pytestmark = [
+    pytest.mark.single_cpu,
+    pytest.mark.slow,
+    pytest.mark.usefixtures("pyarrow_skip"),
+]
 
 
 def _construct_dataframe(num_rows):
@@ -40,7 +44,6 @@ def _construct_dataframe(num_rows):
     return df
 
 
-@pytest.mark.slow
 def test_multi_thread_string_io_read_csv(all_parsers):
     # see gh-11786
     parser = all_parsers
@@ -135,7 +138,6 @@ def reader(arg):
     return final_dataframe
 
 
-@pytest.mark.slow
 def test_multi_thread_path_multipart_read_csv(all_parsers):
     # see gh-11786
     num_tasks = 4
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index a0d9c6ae99dcf..f3ae5b54d09ce 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -16,7 +16,6 @@
 
 from pandas import DataFrame
 import pandas._testing as tm
-from pandas.tests.io.test_compression import _compression_to_extension
 
 from pandas.io.feather_format import read_feather
 from pandas.io.parsers import read_csv
@@ -32,10 +31,12 @@
 )
 @pytest.mark.parametrize("mode", ["explicit", "infer"])
 @pytest.mark.parametrize("engine", ["python", "c"])
-def test_compressed_urls(salaries_table, mode, engine, compression_only):
+def test_compressed_urls(
+    salaries_table, mode, engine, compression_only, compression_to_extension
+):
     # test reading compressed urls with various engines and
     # extension inference
-    extension = _compression_to_extension[compression_only]
+    extension = compression_to_extension[compression_only]
     base_url = (
         "https://github.com/pandas-dev/pandas/raw/main/"
         "pandas/tests/io/parser/data/salaries.csv"
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 030650ad0031d..c682963c462cc 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -26,7 +26,6 @@
     ArrowStringArray,
     StringArray,
 )
-from pandas.tests.io.test_compression import _compression_to_extension
 
 from pandas.io.common import urlopen
 from pandas.io.parsers import (
@@ -667,13 +666,13 @@ def test_default_delimiter():
 
 
 @pytest.mark.parametrize("infer", [True, False])
-def test_fwf_compression(compression_only, infer):
+def test_fwf_compression(compression_only, infer, compression_to_extension):
     data = """1111111111
     2222222222
     3333333333""".strip()
 
     compression = compression_only
-    extension = _compression_to_extension[compression]
+    extension = compression_to_extension[compression]
 
     kwargs = {"widths": [5, 5], "names": ["one", "two"]}
     expected = read_fwf(StringIO(data), **kwargs)
diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py
index 32231cbbdda64..069ac2a69d224 100644
--- a/pandas/tests/io/parser/usecols/test_parse_dates.py
+++ b/pandas/tests/io/parser/usecols/test_parse_dates.py
@@ -13,15 +13,6 @@
 )
 import pandas._testing as tm
 
-_msg_validate_usecols_arg = (
-    "'usecols' must either be list-like "
-    "of all strings, all unicode, all "
-    "integers or a callable."
-)
-_msg_validate_usecols_names = (
-    "Usecols do not match columns, columns expected but not found: {0}"
-)
-
 # TODO(1.4): Change these to xfails whenever parse_dates support(which was
 # intentionally disable to keep small PR sizes) is added back
 pytestmark = pytest.mark.usefixtures("pyarrow_skip")
diff --git a/pandas/tests/io/parser/usecols/test_strings.py b/pandas/tests/io/parser/usecols/test_strings.py
index 8cecf1fc981ee..22f19ec518e4a 100644
--- a/pandas/tests/io/parser/usecols/test_strings.py
+++ b/pandas/tests/io/parser/usecols/test_strings.py
@@ -9,15 +9,6 @@
 from pandas import DataFrame
 import pandas._testing as tm
 
-_msg_validate_usecols_arg = (
-    "'usecols' must either be list-like "
-    "of all strings, all unicode, all "
-    "integers or a callable."
-)
-_msg_validate_usecols_names = (
-    "Usecols do not match columns, columns expected but not found: {0}"
-)
-
 
 def test_usecols_with_unicode_strings(all_parsers):
     # see gh-13219
@@ -70,7 +61,11 @@ def test_usecols_with_mixed_encoding_strings(all_parsers, usecols):
 2.613230982,2,False,b
 3.568935038,7,False,a"""
     parser = all_parsers
-
+    _msg_validate_usecols_arg = (
+        "'usecols' must either be list-like "
+        "of all strings, all unicode, all "
+        "integers or a callable."
+    )
     with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
         parser.read_csv(StringIO(data), usecols=usecols)
 
diff --git a/pandas/tests/io/pytables/test_categorical.py b/pandas/tests/io/pytables/test_categorical.py
index bb95762950d8e..b227c935c2b62 100644
--- a/pandas/tests/io/pytables/test_categorical.py
+++ b/pandas/tests/io/pytables/test_categorical.py
@@ -14,9 +14,7 @@
     ensure_clean_store,
 )
 
-pytestmark = [
-    pytest.mark.single_cpu,
-]
+pytestmark = pytest.mark.single_cpu
 
 
 def test_categorical(setup_path):
diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py
index 49190daa37442..1a126ad75c01c 100644
--- a/pandas/tests/io/pytables/test_file_handling.py
+++ b/pandas/tests/io/pytables/test_file_handling.py
@@ -3,7 +3,12 @@
 import numpy as np
 import pytest
 
-from pandas.compat import is_platform_little_endian
+from pandas.compat import (
+    PY311,
+    is_ci_environment,
+    is_platform_linux,
+    is_platform_little_endian,
+)
 from pandas.errors import (
     ClosedFileError,
     PossibleDataLossError,
@@ -222,39 +227,44 @@ def test_complibs_default_settings_override(tmp_path, setup_path):
             assert node.filters.complib == "blosc"
 
 
-def test_complibs(tmp_path, setup_path):
+@pytest.mark.parametrize("lvl", range(10))
+@pytest.mark.parametrize("lib", tables.filters.all_complibs)
+@pytest.mark.filterwarnings("ignore:object name is not a valid")
+@pytest.mark.xfail(
+    not PY311 and is_ci_environment() and is_platform_linux(),
+    reason="producing invalid start bytes",
+    raises=UnicodeDecodeError,
+    strict=False,
+)
+def test_complibs(tmp_path, lvl, lib):
     # GH14478
-    df = tm.makeDataFrame()
+    df = DataFrame(
+        np.ones((30, 4)), columns=list("ABCD"), index=np.arange(30).astype(np.str_)
+    )
 
-    # Building list of all complibs and complevels tuples
-    all_complibs = tables.filters.all_complibs
     # Remove lzo if its not available on this platform
     if not tables.which_lib_version("lzo"):
-        all_complibs.remove("lzo")
+        pytest.skip("lzo not available")
     # Remove bzip2 if its not available on this platform
     if not tables.which_lib_version("bzip2"):
-        all_complibs.remove("bzip2")
+        pytest.skip("bzip2 not available")
 
-    all_levels = range(0, 10)
-    all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
+    tmpfile = tmp_path / f"{lvl}_{lib}.h5"
+    gname = f"{lvl}_{lib}"
 
-    for lib, lvl in all_tests:
-        tmpfile = tmp_path / setup_path
-        gname = "foo"
-
-        # Write and read file to see if data is consistent
-        df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
-        result = read_hdf(tmpfile, gname)
-        tm.assert_frame_equal(result, df)
+    # Write and read file to see if data is consistent
+    df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
+    result = read_hdf(tmpfile, gname)
+    tm.assert_frame_equal(result, df)
 
-        # Open file and check metadata for correct amount of compression
-        with tables.open_file(tmpfile, mode="r") as h5table:
-            for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
-                assert node.filters.complevel == lvl
-                if lvl == 0:
-                    assert node.filters.complib is None
-                else:
-                    assert node.filters.complib == lib
+    # Open file and check metadata for correct amount of compression
+    with tables.open_file(tmpfile, mode="r") as h5table:
+        for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
+            assert node.filters.complevel == lvl
+            if lvl == 0:
+                assert node.filters.complib is None
+            else:
+                assert node.filters.complib == lib
 
 
 @pytest.mark.skipif(
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index 2461f937c9eff..42f020a8f3708 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -26,9 +26,6 @@
 )
 from pandas.util import _test_decorators as td
 
-_default_compressor = "blosc"
-
-
 pytestmark = pytest.mark.single_cpu
 
 
@@ -479,7 +476,7 @@ def _make_one():
 def _check_roundtrip(obj, comparator, path, compression=False, **kwargs):
     options = {}
     if compression:
-        options["complib"] = _default_compressor
+        options["complib"] = "blosc"
 
     with ensure_clean_store(path, "w", **options) as store:
         store["obj"] = obj
@@ -490,7 +487,7 @@ def _check_roundtrip(obj, comparator, path, compression=False, **kwargs):
 def _check_roundtrip_table(obj, comparator, path, compression=False):
     options = {}
     if compression:
-        options["complib"] = _default_compressor
+        options["complib"] = "blosc"
 
     with ensure_clean_store(path, "w", **options) as store:
         store.put("obj", obj, format="table")
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 2d87b719af36b..82330e1d63c9a 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -30,8 +30,6 @@
     safe_close,
 )
 
-_default_compressor = "blosc"
-
 from pandas.io.pytables import (
     HDFStore,
     read_hdf,
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index ac11e2165eb6f..c84670f0eb69c 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -18,10 +18,6 @@
 
 import pandas.io.common as icom
 
-_compression_to_extension = {
-    value: key for key, value in icom.extension_to_compression.items()
-}
-
 
 @pytest.mark.parametrize(
     "obj",
@@ -84,11 +80,11 @@ def test_compression_size_fh(obj, method, compression_only):
     ],
 )
 def test_dataframe_compression_defaults_to_infer(
-    write_method, write_kwargs, read_method, compression_only
+    write_method, write_kwargs, read_method, compression_only, compression_to_extension
 ):
     # GH22004
     input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=["X", "Y", "Z"])
-    extension = _compression_to_extension[compression_only]
+    extension = compression_to_extension[compression_only]
     with tm.ensure_clean("compressed" + extension) as path:
         getattr(input, write_method)(path, **write_kwargs)
         output = read_method(path, compression=compression_only)
@@ -104,11 +100,16 @@ def test_dataframe_compression_defaults_to_infer(
     ],
 )
 def test_series_compression_defaults_to_infer(
-    write_method, write_kwargs, read_method, read_kwargs, compression_only
+    write_method,
+    write_kwargs,
+    read_method,
+    read_kwargs,
+    compression_only,
+    compression_to_extension,
 ):
     # GH22004
     input = pd.Series([0, 5, -2, 10], name="X")
-    extension = _compression_to_extension[compression_only]
+    extension = compression_to_extension[compression_only]
     with tm.ensure_clean("compressed" + extension) as path:
         getattr(input, write_method)(path, **write_kwargs)
         if "squeeze" in read_kwargs:
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 01e1be5529bad..7b139dc45624e 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -11,7 +11,7 @@
 
 from pandas.io.feather_format import read_feather, to_feather  # isort:skip
 
-pyarrow = pytest.importorskip("pyarrow", minversion="1.0.1")
+pyarrow = pytest.importorskip("pyarrow")
 
 
 @pytest.mark.single_cpu
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 18cc0f0b11dc9..bdea24f7bb5aa 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -16,7 +16,6 @@
     read_parquet,
 )
 import pandas._testing as tm
-from pandas.tests.io.test_compression import _compression_to_extension
 from pandas.util import _test_decorators as td
 
 
@@ -45,6 +44,8 @@ def ls(self, path, **kwargs):
 
 
 @td.skip_if_no("gcsfs")
+# Patches pyarrow; other processes should not pick up change
+@pytest.mark.single_cpu
 @pytest.mark.parametrize("format", ["csv", "json", "parquet", "excel", "markdown"])
 def test_to_read_gcs(gcs_buffer, format, monkeypatch, capsys):
     """
@@ -132,7 +133,9 @@ def assert_equal_zip_safe(result: bytes, expected: bytes, compression: str):
 
 @td.skip_if_no("gcsfs")
 @pytest.mark.parametrize("encoding", ["utf-8", "cp1251"])
-def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding):
+def test_to_csv_compression_encoding_gcs(
+    gcs_buffer, compression_only, encoding, compression_to_extension
+):
     """
     Compression and encoding should with GCS.
 
@@ -161,7 +164,7 @@ def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding)
     tm.assert_frame_equal(df, read_df)
 
     # write compressed file with implicit compression
-    file_ext = _compression_to_extension[compression_only]
+    file_ext = compression_to_extension[compression_only]
     compression["method"] = "infer"
     path_gcs += f".{file_ext}"
     df.to_csv(path_gcs, compression=compression, encoding=encoding)
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index e54a23b1f8ef6..5c6c33de5ac5f 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -1347,6 +1347,7 @@ def __iter__(self) -> Iterator:
         assert self.read_html(bad)
 
     @pytest.mark.slow
+    @pytest.mark.single_cpu
     def test_importcheck_thread_safety(self, datapath):
         # see gh-16928
 
diff --git a/pandas/tests/io/test_orc.py b/pandas/tests/io/test_orc.py
index 36cfe5576adf9..571d9d5536e20 100644
--- a/pandas/tests/io/test_orc.py
+++ b/pandas/tests/io/test_orc.py
@@ -25,25 +25,19 @@ def dirpath(datapath):
     return datapath("io", "data", "orc")
 
 
-# Examples of dataframes with dtypes for which conversion to ORC
-# hasn't been implemented yet, that is, Category, unsigned integers,
-# interval, period and sparse.
-orc_writer_dtypes_not_supported = [
-    pd.DataFrame({"unimpl": np.array([1, 20], dtype="uint64")}),
-    pd.DataFrame({"unimpl": pd.Series(["a", "b", "a"], dtype="category")}),
-    pd.DataFrame(
-        {"unimpl": [pd.Interval(left=0, right=2), pd.Interval(left=0, right=5)]}
-    ),
-    pd.DataFrame(
-        {
-            "unimpl": [
-                pd.Period("2022-01-03", freq="D"),
-                pd.Period("2022-01-04", freq="D"),
-            ]
-        }
-    ),
-    pd.DataFrame({"unimpl": [np.nan] * 50}).astype(pd.SparseDtype("float", np.nan)),
-]
+@pytest.fixture(
+    params=[
+        np.array([1, 20], dtype="uint64"),
+        pd.Series(["a", "b", "a"], dtype="category"),
+        [pd.Interval(left=0, right=2), pd.Interval(left=0, right=5)],
+        [pd.Period("2022-01-03", freq="D"), pd.Period("2022-01-04", freq="D")],
+    ]
+)
+def orc_writer_dtypes_not_supported(request):
+    # Examples of dataframes with dtypes for which conversion to ORC
+    # hasn't been implemented yet, that is, Category, unsigned integers,
+    # interval, period and sparse.
+    return pd.DataFrame({"unimpl": request.param})
 
 
 def test_orc_reader_empty(dirpath):
@@ -297,13 +291,12 @@ def test_orc_roundtrip_bytesio():
 
 
 @td.skip_if_no("pyarrow", min_version="7.0.0")
-@pytest.mark.parametrize("df_not_supported", orc_writer_dtypes_not_supported)
-def test_orc_writer_dtypes_not_supported(df_not_supported):
+def test_orc_writer_dtypes_not_supported(orc_writer_dtypes_not_supported):
     # GH44554
     # PyArrow gained ORC write support with the current argument order
     msg = "The dtype of one or more columns is not supported yet."
     with pytest.raises(NotImplementedError, match=msg):
-        df_not_supported.to_orc()
+        orc_writer_dtypes_not_supported.to_orc()
 
 
 @td.skip_if_no("pyarrow", min_version="7.0.0")
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 57ef03b380601..10fce6b5bf43d 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -433,8 +433,12 @@ def test_read_columns(self, engine):
             df, engine, expected=expected, read_kwargs={"columns": ["string"]}
         )
 
-    def test_write_index(self, engine):
+    def test_write_index(self, engine, using_copy_on_write, request):
         check_names = engine != "fastparquet"
+        if using_copy_on_write and engine == "fastparquet":
+            request.node.add_marker(
+                pytest.mark.xfail(reason="fastparquet write into index")
+            )
 
         df = pd.DataFrame({"A": [1, 2, 3]})
         check_round_trip(df, engine)
@@ -1213,12 +1217,14 @@ def test_error_on_using_partition_cols_and_partition_on(
                 partition_cols=partition_cols,
             )
 
+    @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index")
     def test_empty_dataframe(self, fp):
         # GH #27339
         df = pd.DataFrame()
         expected = df.copy()
         check_round_trip(df, fp, expected=expected)
 
+    @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index")
     def test_timezone_aware_index(self, fp, timezone_aware_date_list):
         idx = 5 * [timezone_aware_date_list]
 
@@ -1328,6 +1334,7 @@ def test_invalid_dtype_backend(self, engine):
             with pytest.raises(ValueError, match=msg):
                 read_parquet(path, dtype_backend="numpy")
 
+    @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index")
     def test_empty_columns(self, fp):
         # GH 52034
         df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name"))
diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py
index 6702d58c139af..5171ec04b0bcf 100644
--- a/pandas/tests/io/test_s3.py
+++ b/pandas/tests/io/test_s3.py
@@ -1,5 +1,4 @@
 from io import BytesIO
-import os
 
 import pytest
 
@@ -34,17 +33,16 @@ def test_read_without_creds_from_pub_bucket():
 @td.skip_if_no("s3fs")
 @pytest.mark.network
 @tm.network
-def test_read_with_creds_from_pub_bucket():
+def test_read_with_creds_from_pub_bucket(monkeypatch):
     # Ensure we can read from a public bucket with credentials
     # GH 34626
     # Use Amazon Open Data Registry - https://registry.opendata.aws/gdelt
 
-    with tm.ensure_safe_environment_variables():
-        # temporary workaround as moto fails for botocore >= 1.11 otherwise,
-        # see https://github.com/spulec/moto/issues/1924 & 1952
-        os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
-        os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
-        df = read_csv(
-            "s3://gdelt-open-data/events/1981.csv", nrows=5, sep="\t", header=None
-        )
-        assert len(df) == 5
+    # temporary workaround as moto fails for botocore >= 1.11 otherwise,
+    # see https://github.com/spulec/moto/issues/1924 & 1952
+    monkeypatch.setenv("AWS_ACCESS_KEY_ID", "foobar_key")
+    monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "foobar_secret")
+    df = read_csv(
+        "s3://gdelt-open-data/events/1981.csv", nrows=5, sep="\t", header=None
+    )
+    assert len(df) == 5
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 7a3f7521d4a17..75fcef09535d4 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -75,31 +75,34 @@
 except ImportError:
     SQLALCHEMY_INSTALLED = False
 
-SQL_STRINGS = {
-    "read_parameters": {
-        "sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
-        "mysql": "SELECT * FROM iris WHERE `Name`=%s AND `SepalLength`=%s",
-        "postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
-    },
-    "read_named_parameters": {
-        "sqlite": """
+
+@pytest.fixture
+def sql_strings():
+    return {
+        "read_parameters": {
+            "sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
+            "mysql": "SELECT * FROM iris WHERE `Name`=%s AND `SepalLength`=%s",
+            "postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
+        },
+        "read_named_parameters": {
+            "sqlite": """
                 SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
                 """,
-        "mysql": """
+            "mysql": """
                 SELECT * FROM iris WHERE
                 `Name`=%(name)s AND `SepalLength`=%(length)s
                 """,
-        "postgresql": """
+            "postgresql": """
                 SELECT * FROM iris WHERE
                 "Name"=%(name)s AND "SepalLength"=%(length)s
                 """,
-    },
-    "read_no_parameters_with_percent": {
-        "sqlite": "SELECT * FROM iris WHERE Name LIKE '%'",
-        "mysql": "SELECT * FROM iris WHERE `Name` LIKE '%'",
-        "postgresql": "SELECT * FROM iris WHERE \"Name\" LIKE '%'",
-    },
-}
+        },
+        "read_no_parameters_with_percent": {
+            "sqlite": "SELECT * FROM iris WHERE Name LIKE '%'",
+            "mysql": "SELECT * FROM iris WHERE `Name` LIKE '%'",
+            "postgresql": "SELECT * FROM iris WHERE \"Name\" LIKE '%'",
+        },
+    }
 
 
 def iris_table_metadata(dialect: str):
@@ -669,12 +672,12 @@ def test_read_iris_query_expression_with_parameter(conn, request):
 
 @pytest.mark.db
 @pytest.mark.parametrize("conn", all_connectable_iris)
-def test_read_iris_query_string_with_parameter(conn, request):
-    for db, query in SQL_STRINGS["read_parameters"].items():
+def test_read_iris_query_string_with_parameter(conn, request, sql_strings):
+    for db, query in sql_strings["read_parameters"].items():
         if db in conn:
             break
     else:
-        raise KeyError(f"No part of {conn} found in SQL_STRINGS['read_parameters']")
+        raise KeyError(f"No part of {conn} found in sql_strings['read_parameters']")
     conn = request.getfixturevalue(conn)
     iris_frame = read_sql_query(query, conn, params=("Iris-setosa", 5.1))
     check_iris_frame(iris_frame)
@@ -817,6 +820,117 @@ def psql_insert_copy(table, conn, keys, data_iter):
     tm.assert_frame_equal(result, expected)
 
 
+@pytest.mark.db
+@pytest.mark.parametrize("conn", postgresql_connectable)
+def test_insertion_method_on_conflict_do_nothing(conn, request):
+    # GH 15988: Example in to_sql docstring
+    conn = request.getfixturevalue(conn)
+
+    from sqlalchemy.dialects.postgresql import insert
+    from sqlalchemy.engine import Engine
+    from sqlalchemy.sql import text
+
+    def insert_on_conflict(table, conn, keys, data_iter):
+        data = [dict(zip(keys, row)) for row in data_iter]
+        stmt = (
+            insert(table.table)
+            .values(data)
+            .on_conflict_do_nothing(index_elements=["a"])
+        )
+        result = conn.execute(stmt)
+        return result.rowcount
+
+    create_sql = text(
+        """
+    CREATE TABLE test_insert_conflict (
+        a  integer PRIMARY KEY,
+        b  numeric,
+        c  text
+    );
+    """
+    )
+    if isinstance(conn, Engine):
+        with conn.connect() as con:
+            with con.begin():
+                con.execute(create_sql)
+    else:
+        with conn.begin():
+            conn.execute(create_sql)
+
+    expected = DataFrame([[1, 2.1, "a"]], columns=list("abc"))
+    expected.to_sql("test_insert_conflict", conn, if_exists="append", index=False)
+
+    df_insert = DataFrame([[1, 3.2, "b"]], columns=list("abc"))
+    inserted = df_insert.to_sql(
+        "test_insert_conflict",
+        conn,
+        index=False,
+        if_exists="append",
+        method=insert_on_conflict,
+    )
+    result = sql.read_sql_table("test_insert_conflict", conn)
+    tm.assert_frame_equal(result, expected)
+    assert inserted == 0
+
+    # Cleanup
+    with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL:
+        pandasSQL.drop_table("test_insert_conflict")
+
+
+@pytest.mark.db
+@pytest.mark.parametrize("conn", mysql_connectable)
+def test_insertion_method_on_conflict_update(conn, request):
+    # GH 14553: Example in to_sql docstring
+    conn = request.getfixturevalue(conn)
+
+    from sqlalchemy.dialects.mysql import insert
+    from sqlalchemy.engine import Engine
+    from sqlalchemy.sql import text
+
+    def insert_on_conflict(table, conn, keys, data_iter):
+        data = [dict(zip(keys, row)) for row in data_iter]
+        stmt = insert(table.table).values(data)
+        stmt = stmt.on_duplicate_key_update(b=stmt.inserted.b, c=stmt.inserted.c)
+        result = conn.execute(stmt)
+        return result.rowcount
+
+    create_sql = text(
+        """
+    CREATE TABLE test_insert_conflict (
+        a INT PRIMARY KEY,
+        b FLOAT,
+        c VARCHAR(10)
+    );
+    """
+    )
+    if isinstance(conn, Engine):
+        with conn.connect() as con:
+            with con.begin():
+                con.execute(create_sql)
+    else:
+        with conn.begin():
+            conn.execute(create_sql)
+
+    df = DataFrame([[1, 2.1, "a"]], columns=list("abc"))
+    df.to_sql("test_insert_conflict", conn, if_exists="append", index=False)
+
+    expected = DataFrame([[1, 3.2, "b"]], columns=list("abc"))
+    inserted = expected.to_sql(
+        "test_insert_conflict",
+        conn,
+        index=False,
+        if_exists="append",
+        method=insert_on_conflict,
+    )
+    result = sql.read_sql_table("test_insert_conflict", conn)
+    tm.assert_frame_equal(result, expected)
+    assert inserted == 2
+
+    # Cleanup
+    with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL:
+        pandasSQL.drop_table("test_insert_conflict")
+
+
 def test_execute_typeerror(sqlite_iris_engine):
     with pytest.raises(TypeError, match="pandas.io.sql.execute requires a connection"):
         with tm.assert_produces_warning(
@@ -933,20 +1047,20 @@ def load_types_data(self, types_data):
         else:
             create_and_load_types(self.conn, types_data, self.flavor)
 
-    def _read_sql_iris_parameter(self):
-        query = SQL_STRINGS["read_parameters"][self.flavor]
+    def _read_sql_iris_parameter(self, sql_strings):
+        query = sql_strings["read_parameters"][self.flavor]
         params = ("Iris-setosa", 5.1)
         iris_frame = self.pandasSQL.read_query(query, params=params)
         check_iris_frame(iris_frame)
 
-    def _read_sql_iris_named_parameter(self):
-        query = SQL_STRINGS["read_named_parameters"][self.flavor]
+    def _read_sql_iris_named_parameter(self, sql_strings):
+        query = sql_strings["read_named_parameters"][self.flavor]
         params = {"name": "Iris-setosa", "length": 5.1}
         iris_frame = self.pandasSQL.read_query(query, params=params)
         check_iris_frame(iris_frame)
 
-    def _read_sql_iris_no_parameter_with_percent(self):
-        query = SQL_STRINGS["read_no_parameters_with_percent"][self.flavor]
+    def _read_sql_iris_no_parameter_with_percent(self, sql_strings):
+        query = sql_strings["read_no_parameters_with_percent"][self.flavor]
         iris_frame = self.pandasSQL.read_query(query, params=None)
         check_iris_frame(iris_frame)
 
@@ -1832,11 +1946,11 @@ def setup_driver(cls):
     def setup_engine(cls):
         raise NotImplementedError()
 
-    def test_read_sql_parameter(self):
-        self._read_sql_iris_parameter()
+    def test_read_sql_parameter(self, sql_strings):
+        self._read_sql_iris_parameter(sql_strings)
 
-    def test_read_sql_named_parameter(self):
-        self._read_sql_iris_named_parameter()
+    def test_read_sql_named_parameter(self, sql_strings):
+        self._read_sql_iris_named_parameter(sql_strings)
 
     def test_to_sql_empty(self, test_frame1):
         self._to_sql_empty(test_frame1)
@@ -2948,11 +3062,11 @@ def setup_method(self, iris_path, types_data):
         self.load_types_data(types_data)
         self.pandasSQL = sql.SQLiteDatabase(self.conn)
 
-    def test_read_sql_parameter(self):
-        self._read_sql_iris_parameter()
+    def test_read_sql_parameter(self, sql_strings):
+        self._read_sql_iris_parameter(sql_strings)
 
-    def test_read_sql_named_parameter(self):
-        self._read_sql_iris_named_parameter()
+    def test_read_sql_named_parameter(self, sql_strings):
+        self._read_sql_iris_named_parameter(sql_strings)
 
     def test_to_sql_empty(self, test_frame1):
         self._to_sql_empty(test_frame1)
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 1b0a1d740677b..c4035ea867962 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -19,7 +19,6 @@
     DataFrame,
     Series,
 )
-from pandas.tests.io.test_compression import _compression_to_extension
 
 from pandas.io.parsers import read_csv
 from pandas.io.stata import (
@@ -1832,9 +1831,10 @@ def test_encoding_latin1_118(self, datapath):
         # will block pytests skip mechanism from triggering (failing the test)
         # if the path is not present
         path = datapath("io", "data", "stata", "stata1_encoding_118.dta")
-        with tm.assert_produces_warning(UnicodeWarning) as w:
+        with tm.assert_produces_warning(UnicodeWarning, filter_level="once") as w:
             encoded = read_stata(path)
-            assert len(w) == 151
+            # with filter_level="always", produces 151 warnings which can be slow
+            assert len(w) == 1
             assert w[0].message.args[0] == msg
 
         expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"])
@@ -1964,13 +1964,13 @@ def test_statareader_warns_when_used_without_context(datapath):
 @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
 @pytest.mark.parametrize("use_dict", [True, False])
 @pytest.mark.parametrize("infer", [True, False])
-def test_compression(compression, version, use_dict, infer):
+def test_compression(compression, version, use_dict, infer, compression_to_extension):
     file_name = "dta_inferred_compression.dta"
     if compression:
         if use_dict:
             file_ext = compression
         else:
-            file_ext = _compression_to_extension[compression]
+            file_ext = compression_to_extension[compression]
         file_name += f".{file_ext}"
     compression_arg = compression
     if infer:
@@ -2134,10 +2134,12 @@ def test_compression_roundtrip(compression):
 
 @pytest.mark.parametrize("to_infer", [True, False])
 @pytest.mark.parametrize("read_infer", [True, False])
-def test_stata_compression(compression_only, read_infer, to_infer):
+def test_stata_compression(
+    compression_only, read_infer, to_infer, compression_to_extension
+):
     compression = compression_only
 
-    ext = _compression_to_extension[compression]
+    ext = compression_to_extension[compression]
     filename = f"test.{ext}"
 
     df = DataFrame(
diff --git a/pandas/tests/io/test_user_agent.py b/pandas/tests/io/test_user_agent.py
index 06051a81679fa..b9d3a20b2ecea 100644
--- a/pandas/tests/io/test_user_agent.py
+++ b/pandas/tests/io/test_user_agent.py
@@ -17,10 +17,13 @@
 import pandas as pd
 import pandas._testing as tm
 
-pytestmark = pytest.mark.skipif(
-    is_ci_environment(),
-    reason="GH 45651: This test can hang in our CI min_versions build",
-)
+pytestmark = [
+    pytest.mark.single_cpu,
+    pytest.mark.skipif(
+        is_ci_environment(),
+        reason="GH 45651: This test can hang in our CI min_versions build",
+    ),
+]
 
 
 class BaseUserAgentResponder(http.server.BaseHTTPRequestHandler):
diff --git a/pandas/tests/io/xml/conftest.py b/pandas/tests/io/xml/conftest.py
new file mode 100644
index 0000000000000..510e22fb32e77
--- /dev/null
+++ b/pandas/tests/io/xml/conftest.py
@@ -0,0 +1,36 @@
+import pytest
+
+
+@pytest.fixture
+def xml_data_path(tests_io_data_path):
+    return tests_io_data_path / "xml"
+
+
+@pytest.fixture
+def xml_books(xml_data_path):
+    return xml_data_path / "books.xml"
+
+
+@pytest.fixture
+def xml_doc_ch_utf(xml_data_path):
+    return xml_data_path / "doc_ch_utf.xml"
+
+
+@pytest.fixture
+def xml_baby_names(xml_data_path):
+    return xml_data_path / "baby_names.xml"
+
+
+@pytest.fixture
+def kml_cta_rail_lines(xml_data_path):
+    return xml_data_path / "cta_rail_lines.kml"
+
+
+@pytest.fixture
+def xsl_flatten_doc(xml_data_path):
+    return xml_data_path / "flatten_doc.xsl"
+
+
+@pytest.fixture
+def xsl_row_field_output(xml_data_path):
+    return xml_data_path / "row_field_output.xsl"
diff --git a/pandas/tests/io/xml/test_to_xml.py b/pandas/tests/io/xml/test_to_xml.py
index 1f1f44f408fc1..3a16e8c2b94ae 100644
--- a/pandas/tests/io/xml/test_to_xml.py
+++ b/pandas/tests/io/xml/test_to_xml.py
@@ -17,7 +17,6 @@
     Index,
 )
 import pandas._testing as tm
-from pandas.tests.io.test_compression import _compression_to_extension
 
 from pandas.io.common import get_handle
 from pandas.io.xml import read_xml
@@ -56,60 +55,69 @@
 # [X] - XSLTParseError: "failed to compile"
 # [X] - PermissionError: "Forbidden"
 
-geom_df = DataFrame(
-    {
-        "shape": ["square", "circle", "triangle"],
-        "degrees": [360, 360, 180],
-        "sides": [4, np.nan, 3],
-    }
-)
 
-planet_df = DataFrame(
-    {
-        "planet": [
-            "Mercury",
-            "Venus",
-            "Earth",
-            "Mars",
-            "Jupiter",
-            "Saturn",
-            "Uranus",
-            "Neptune",
-        ],
-        "type": [
-            "terrestrial",
-            "terrestrial",
-            "terrestrial",
-            "terrestrial",
-            "gas giant",
-            "gas giant",
-            "ice giant",
-            "ice giant",
-        ],
-        "location": [
-            "inner",
-            "inner",
-            "inner",
-            "inner",
-            "outer",
-            "outer",
-            "outer",
-            "outer",
-        ],
-        "mass": [
-            0.330114,
-            4.86747,
-            5.97237,
-            0.641712,
-            1898.187,
-            568.3174,
-            86.8127,
-            102.4126,
-        ],
-    }
-)
+@pytest.fixture
+def geom_df():
+    return DataFrame(
+        {
+            "shape": ["square", "circle", "triangle"],
+            "degrees": [360, 360, 180],
+            "sides": [4, np.nan, 3],
+        }
+    )
+
+
+@pytest.fixture
+def planet_df():
+    return DataFrame(
+        {
+            "planet": [
+                "Mercury",
+                "Venus",
+                "Earth",
+                "Mars",
+                "Jupiter",
+                "Saturn",
+                "Uranus",
+                "Neptune",
+            ],
+            "type": [
+                "terrestrial",
+                "terrestrial",
+                "terrestrial",
+                "terrestrial",
+                "gas giant",
+                "gas giant",
+                "ice giant",
+                "ice giant",
+            ],
+            "location": [
+                "inner",
+                "inner",
+                "inner",
+                "inner",
+                "outer",
+                "outer",
+                "outer",
+                "outer",
+            ],
+            "mass": [
+                0.330114,
+                4.86747,
+                5.97237,
+                0.641712,
+                1898.187,
+                568.3174,
+                86.8127,
+                102.4126,
+            ],
+        }
+    )
+
 
-from_file_expected = """\
+@pytest.fixture
+def from_file_expected():
+    return """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
   <row>
@@ -163,9 +171,8 @@ def parser(request):
 # FILE OUTPUT
 
 
-def test_file_output_str_read(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file = read_xml(filename, parser=parser)
+def test_file_output_str_read(xml_books, parser, from_file_expected):
+    df_file = read_xml(xml_books, parser=parser)
 
     with tm.ensure_clean("test.xml") as path:
         df_file.to_xml(path, parser=parser)
@@ -177,9 +184,8 @@ def test_file_output_str_read(datapath, parser):
         assert output == from_file_expected
 
 
-def test_file_output_bytes_read(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file = read_xml(filename, parser=parser)
+def test_file_output_bytes_read(xml_books, parser, from_file_expected):
+    df_file = read_xml(xml_books, parser=parser)
 
     with tm.ensure_clean("test.xml") as path:
         df_file.to_xml(path, parser=parser)
@@ -191,9 +197,8 @@ def test_file_output_bytes_read(datapath, parser):
         assert output == from_file_expected
 
 
-def test_str_output(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file = read_xml(filename, parser=parser)
+def test_str_output(xml_books, parser, from_file_expected):
+    df_file = read_xml(xml_books, parser=parser)
 
     output = df_file.to_xml(parser=parser)
     output = equalize_decl(output)
@@ -201,7 +206,7 @@ def test_str_output(datapath, parser):
     assert output == from_file_expected
 
 
-def test_wrong_file_path(parser):
+def test_wrong_file_path(parser, geom_df):
     path = "/my/fake/path/output.xml"
 
     with pytest.raises(
@@ -214,7 +219,7 @@ def test_wrong_file_path(parser):
 # INDEX
 
 
-def test_index_false(datapath, parser):
+def test_index_false(xml_books, parser):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -241,8 +246,7 @@ def test_index_false(datapath, parser):
   </row>
 </data>"""
 
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file = read_xml(filename, parser=parser)
+    df_file = read_xml(xml_books, parser=parser)
 
     with tm.ensure_clean("test.xml") as path:
         df_file.to_xml(path, index=False, parser=parser)
@@ -254,7 +258,7 @@ def test_index_false(datapath, parser):
         assert output == expected
 
 
-def test_index_false_rename_row_root(datapath, parser):
+def test_index_false_rename_row_root(xml_books, parser):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <books>
@@ -281,8 +285,7 @@ def test_index_false_rename_row_root(datapath, parser):
   </book>
 </books>"""
 
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file = read_xml(filename, parser=parser)
+    df_file = read_xml(xml_books, parser=parser)
 
     with tm.ensure_clean("test.xml") as path:
         df_file.to_xml(
@@ -299,7 +302,7 @@ def test_index_false_rename_row_root(datapath, parser):
 @pytest.mark.parametrize(
     "offset_index", [list(range(10, 13)), [str(i) for i in range(10, 13)]]
 )
-def test_index_false_with_offset_input_index(parser, offset_index):
+def test_index_false_with_offset_input_index(parser, offset_index, geom_df):
     """
     Tests that the output does not contain the `<index>` field when the index of the
     input Dataframe has an offset.
@@ -361,21 +364,21 @@ def test_index_false_with_offset_input_index(parser, offset_index):
 </data>"""
 
 
-def test_na_elem_output(parser):
+def test_na_elem_output(parser, geom_df):
     output = geom_df.to_xml(parser=parser)
     output = equalize_decl(output)
 
     assert output == na_expected
 
 
-def test_na_empty_str_elem_option(parser):
+def test_na_empty_str_elem_option(parser, geom_df):
     output = geom_df.to_xml(na_rep="", parser=parser)
     output = equalize_decl(output)
 
     assert output == na_expected
 
 
-def test_na_empty_elem_option(parser):
+def test_na_empty_elem_option(parser, geom_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -408,7 +411,7 @@ def test_na_empty_elem_option(parser):
 # ATTR_COLS
 
 
-def test_attrs_cols_nan_output(parser):
+def test_attrs_cols_nan_output(parser, geom_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -423,7 +426,7 @@ def test_attrs_cols_nan_output(parser):
     assert output == expected
 
 
-def test_attrs_cols_prefix(parser):
+def test_attrs_cols_prefix(parser, geom_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <doc:data xmlns:doc="http://example.xom">
@@ -446,12 +449,12 @@ def test_attrs_cols_prefix(parser):
     assert output == expected
 
 
-def test_attrs_unknown_column(parser):
+def test_attrs_unknown_column(parser, geom_df):
     with pytest.raises(KeyError, match=("no valid column")):
         geom_df.to_xml(attr_cols=["shape", "degree", "sides"], parser=parser)
 
 
-def test_attrs_wrong_type(parser):
+def test_attrs_wrong_type(parser, geom_df):
     with pytest.raises(TypeError, match=("is not a valid type for attr_cols")):
         geom_df.to_xml(attr_cols='"shape", "degree", "sides"', parser=parser)
 
@@ -459,7 +462,7 @@ def test_attrs_wrong_type(parser):
 # ELEM_COLS
 
 
-def test_elems_cols_nan_output(parser):
+def test_elems_cols_nan_output(parser, geom_df):
     elems_cols_expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -488,17 +491,17 @@ def test_elems_cols_nan_output(parser):
     assert output == elems_cols_expected
 
 
-def test_elems_unknown_column(parser):
+def test_elems_unknown_column(parser, geom_df):
     with pytest.raises(KeyError, match=("no valid column")):
         geom_df.to_xml(elem_cols=["shape", "degree", "sides"], parser=parser)
 
 
-def test_elems_wrong_type(parser):
+def test_elems_wrong_type(parser, geom_df):
     with pytest.raises(TypeError, match=("is not a valid type for elem_cols")):
         geom_df.to_xml(elem_cols='"shape", "degree", "sides"', parser=parser)
 
 
-def test_elems_and_attrs_cols(parser):
+def test_elems_and_attrs_cols(parser, geom_df):
     elems_cols_expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -530,7 +533,7 @@ def test_elems_and_attrs_cols(parser):
 # HIERARCHICAL COLUMNS
 
 
-def test_hierarchical_columns(parser):
+def test_hierarchical_columns(parser, planet_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -577,7 +580,7 @@ def test_hierarchical_columns(parser):
     assert output == expected
 
 
-def test_hierarchical_attrs_columns(parser):
+def test_hierarchical_attrs_columns(parser, planet_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -607,7 +610,7 @@ def test_hierarchical_attrs_columns(parser):
 # MULTIINDEX
 
 
-def test_multi_index(parser):
+def test_multi_index(parser, planet_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -646,7 +649,7 @@ def test_multi_index(parser):
     assert output == expected
 
 
-def test_multi_index_attrs_cols(parser):
+def test_multi_index_attrs_cols(parser, planet_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data>
@@ -672,7 +675,7 @@ def test_multi_index_attrs_cols(parser):
 # NAMESPACE
 
 
-def test_default_namespace(parser):
+def test_default_namespace(parser, geom_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <data xmlns="http://example.com">
@@ -705,7 +708,7 @@ def test_default_namespace(parser):
 # PREFIX
 
 
-def test_namespace_prefix(parser):
+def test_namespace_prefix(parser, geom_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <doc:data xmlns:doc="http://example.com">
@@ -737,14 +740,14 @@ def test_namespace_prefix(parser):
     assert output == expected
 
 
-def test_missing_prefix_in_nmsp(parser):
+def test_missing_prefix_in_nmsp(parser, geom_df):
     with pytest.raises(KeyError, match=("doc is not included in namespaces")):
         geom_df.to_xml(
             namespaces={"": "http://example.com"}, prefix="doc", parser=parser
         )
 
 
-def test_namespace_prefix_and_default(parser):
+def test_namespace_prefix_and_default(parser, geom_df):
     expected = """\
 <?xml version='1.0' encoding='utf-8'?>
 <doc:data xmlns="http://example.com" xmlns:doc="http://other.org">
@@ -823,9 +826,8 @@ def test_namespace_prefix_and_default(parser):
 </data>"""
 
 
-def test_encoding_option_str(datapath, parser):
-    filename = datapath("io", "data", "xml", "baby_names.xml")
-    df_file = read_xml(filename, parser=parser, encoding="ISO-8859-1").head(5)
+def test_encoding_option_str(xml_baby_names, parser):
+    df_file = read_xml(xml_baby_names, parser=parser, encoding="ISO-8859-1").head(5)
 
     output = df_file.to_xml(encoding="ISO-8859-1", parser=parser)
 
@@ -840,9 +842,8 @@ def test_encoding_option_str(datapath, parser):
 
 
 @td.skip_if_no("lxml")
-def test_correct_encoding_file(datapath):
-    filename = datapath("io", "data", "xml", "baby_names.xml")
-    df_file = read_xml(filename, encoding="ISO-8859-1", parser="lxml")
+def test_correct_encoding_file(xml_baby_names):
+    df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
 
     with tm.ensure_clean("test.xml") as path:
         df_file.to_xml(path, index=False, encoding="ISO-8859-1", parser="lxml")
@@ -850,15 +851,14 @@ def test_correct_encoding_file(datapath):
 
 @td.skip_if_no("lxml")
 @pytest.mark.parametrize("encoding", ["UTF-8", "UTF-16", "ISO-8859-1"])
-def test_wrong_encoding_option_lxml(datapath, parser, encoding):
-    filename = datapath("io", "data", "xml", "baby_names.xml")
-    df_file = read_xml(filename, encoding="ISO-8859-1", parser="lxml")
+def test_wrong_encoding_option_lxml(xml_baby_names, parser, encoding):
+    df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
 
     with tm.ensure_clean("test.xml") as path:
         df_file.to_xml(path, index=False, encoding=encoding, parser=parser)
 
 
-def test_misspelled_encoding(parser):
+def test_misspelled_encoding(parser, geom_df):
     with pytest.raises(LookupError, match=("unknown encoding")):
         geom_df.to_xml(encoding="uft-8", parser=parser)
 
@@ -867,7 +867,7 @@ def test_misspelled_encoding(parser):
 
 
 @td.skip_if_no("lxml")
-def test_xml_declaration_pretty_print():
+def test_xml_declaration_pretty_print(geom_df):
     expected = """\
 <data>
   <row>
@@ -895,7 +895,7 @@ def test_xml_declaration_pretty_print():
     assert output == expected
 
 
-def test_no_pretty_print_with_decl(parser):
+def test_no_pretty_print_with_decl(parser, geom_df):
     expected = (
         "<?xml version='1.0' encoding='utf-8'?>\n"
         "<data><row><index>0</index><shape>square</shape>"
@@ -916,7 +916,7 @@ def test_no_pretty_print_with_decl(parser):
     assert output == expected
 
 
-def test_no_pretty_print_no_decl(parser):
+def test_no_pretty_print_no_decl(parser, geom_df):
     expected = (
         "<data><row><index>0</index><shape>square</shape>"
         "<degrees>360</degrees><sides>4.0</sides></row><row>"
@@ -939,14 +939,14 @@ def test_no_pretty_print_no_decl(parser):
 
 
 @td.skip_if_installed("lxml")
-def test_default_parser_no_lxml():
+def test_default_parser_no_lxml(geom_df):
     with pytest.raises(
         ImportError, match=("lxml not found, please install or use the etree parser.")
     ):
         geom_df.to_xml()
 
 
-def test_unknown_parser():
+def test_unknown_parser(geom_df):
     with pytest.raises(
         ValueError, match=("Values for parser can only be lxml or etree.")
     ):
@@ -980,22 +980,22 @@ def test_unknown_parser():
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_file_like(datapath, mode):
-    xsl = datapath("io", "data", "xml", "row_field_output.xsl")
-
-    with open(xsl, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_stylesheet_file_like(xsl_row_field_output, mode, geom_df):
+    with open(
+        xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+    ) as f:
         assert geom_df.to_xml(stylesheet=f) == xsl_expected
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_io(datapath, mode):
-    xsl_path = datapath("io", "data", "xml", "row_field_output.xsl")
-
+def test_stylesheet_io(xsl_row_field_output, mode, geom_df):
     # note: By default the bodies of untyped functions are not checked,
     # consider using --check-untyped-defs
     xsl_obj: BytesIO | StringIO  # type: ignore[annotation-unchecked]
 
-    with open(xsl_path, mode, encoding="utf-8" if mode == "r" else None) as f:
+    with open(
+        xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+    ) as f:
         if mode == "rb":
             xsl_obj = BytesIO(f.read())
         else:
@@ -1007,10 +1007,10 @@ def test_stylesheet_io(datapath, mode):
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_buffered_reader(datapath, mode):
-    xsl = datapath("io", "data", "xml", "row_field_output.xsl")
-
-    with open(xsl, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_stylesheet_buffered_reader(xsl_row_field_output, mode, geom_df):
+    with open(
+        xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+    ) as f:
         xsl_obj = f.read()
 
     output = geom_df.to_xml(stylesheet=xsl_obj)
@@ -1019,7 +1019,7 @@ def test_stylesheet_buffered_reader(datapath, mode):
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_wrong_path():
+def test_stylesheet_wrong_path(geom_df):
     from lxml.etree import XMLSyntaxError
 
     xsl = os.path.join("data", "xml", "row_field_output.xslt")
@@ -1033,7 +1033,7 @@ def test_stylesheet_wrong_path():
 
 @td.skip_if_no("lxml")
 @pytest.mark.parametrize("val", ["", b""])
-def test_empty_string_stylesheet(val):
+def test_empty_string_stylesheet(val, geom_df):
     from lxml.etree import XMLSyntaxError
 
     msg = "|".join(
@@ -1050,7 +1050,7 @@ def test_empty_string_stylesheet(val):
 
 
 @td.skip_if_no("lxml")
-def test_incorrect_xsl_syntax():
+def test_incorrect_xsl_syntax(geom_df):
     from lxml.etree import XMLSyntaxError
 
     xsl = """\
@@ -1079,7 +1079,7 @@ def test_incorrect_xsl_syntax():
 
 
 @td.skip_if_no("lxml")
-def test_incorrect_xsl_eval():
+def test_incorrect_xsl_eval(geom_df):
     from lxml.etree import XSLTParseError
 
     xsl = """\
@@ -1108,7 +1108,7 @@ def test_incorrect_xsl_eval():
 
 
 @td.skip_if_no("lxml")
-def test_incorrect_xsl_apply():
+def test_incorrect_xsl_apply(geom_df):
     from lxml.etree import XSLTApplyError
 
     xsl = """\
@@ -1128,7 +1128,7 @@ def test_incorrect_xsl_apply():
             geom_df.to_xml(path, stylesheet=xsl)
 
 
-def test_stylesheet_with_etree():
+def test_stylesheet_with_etree(geom_df):
     xsl = """\
 <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
     <xsl:output method="xml" encoding="utf-8" indent="yes" />
@@ -1147,7 +1147,7 @@ def test_stylesheet_with_etree():
 
 
 @td.skip_if_no("lxml")
-def test_style_to_csv():
+def test_style_to_csv(geom_df):
     xsl = """\
 <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
     <xsl:output method="text" indent="yes" />
@@ -1176,7 +1176,7 @@ def test_style_to_csv():
 
 
 @td.skip_if_no("lxml")
-def test_style_to_string():
+def test_style_to_string(geom_df):
     xsl = """\
 <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
     <xsl:output method="text" indent="yes" />
@@ -1210,7 +1210,7 @@ def test_style_to_string():
 
 
 @td.skip_if_no("lxml")
-def test_style_to_json():
+def test_style_to_json(geom_df):
     xsl = """\
 <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
     <xsl:output method="text" indent="yes" />
@@ -1281,7 +1281,7 @@ def test_style_to_json():
 </data>"""
 
 
-def test_compression_output(parser, compression_only):
+def test_compression_output(parser, compression_only, geom_df):
     with tm.ensure_clean() as path:
         geom_df.to_xml(path, parser=parser, compression=compression_only)
 
@@ -1297,8 +1297,10 @@ def test_compression_output(parser, compression_only):
     assert geom_xml == output.strip()
 
 
-def test_filename_and_suffix_comp(parser, compression_only):
-    compfile = "xml." + _compression_to_extension[compression_only]
+def test_filename_and_suffix_comp(
+    parser, compression_only, geom_df, compression_to_extension
+):
+    compfile = "xml." + compression_to_extension[compression_only]
     with tm.ensure_clean(filename=compfile) as path:
         geom_df.to_xml(path, parser=parser, compression=compression_only)
 
@@ -1328,7 +1330,7 @@ def test_ea_dtypes(any_numeric_ea_dtype, parser):
     assert equalize_decl(result).strip() == expected
 
 
-def test_unsuported_compression(parser):
+def test_unsuported_compression(parser, geom_df):
     with pytest.raises(ValueError, match="Unrecognized compression type"):
         with tm.ensure_clean() as path:
             geom_df.to_xml(path, parser=parser, compression="7z")
@@ -1340,7 +1342,7 @@ def test_unsuported_compression(parser):
 @pytest.mark.single_cpu
 @td.skip_if_no("s3fs")
 @td.skip_if_no("lxml")
-def test_s3_permission_output(parser, s3_resource):
+def test_s3_permission_output(parser, s3_resource, geom_df):
     # s3_resource hosts pandas-test
     import s3fs
 
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index 04abebe4a0a71..b0e806caecc80 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -276,18 +276,17 @@ def read_xml_iterparse_comp(comp_path, compression_only, **kwargs):
 
 
 @td.skip_if_no("lxml")
-def test_parser_consistency_file(datapath):
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file_lxml = read_xml(filename, parser="lxml")
-    df_file_etree = read_xml(filename, parser="etree")
+def test_parser_consistency_file(xml_books):
+    df_file_lxml = read_xml(xml_books, parser="lxml")
+    df_file_etree = read_xml(xml_books, parser="etree")
 
     df_iter_lxml = read_xml(
-        filename,
+        xml_books,
         parser="lxml",
         iterparse={"book": ["category", "title", "year", "author", "price"]},
     )
     df_iter_etree = read_xml(
-        filename,
+        xml_books,
         parser="etree",
         iterparse={"book": ["category", "title", "year", "author", "price"]},
     )
@@ -349,9 +348,8 @@ def test_parser_consistency_url(parser):
     tm.assert_frame_equal(df_xpath, df_iter)
 
 
-def test_file_like(datapath, parser, mode):
-    filename = datapath("io", "data", "xml", "books.xml")
-    with open(filename, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_file_like(xml_books, parser, mode):
+    with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f:
         df_file = read_xml(f, parser=parser)
 
     df_expected = DataFrame(
@@ -367,9 +365,8 @@ def test_file_like(datapath, parser, mode):
     tm.assert_frame_equal(df_file, df_expected)
 
 
-def test_file_io(datapath, parser, mode):
-    filename = datapath("io", "data", "xml", "books.xml")
-    with open(filename, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_file_io(xml_books, parser, mode):
+    with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f:
         xml_obj = f.read()
 
     df_io = read_xml(
@@ -390,9 +387,8 @@ def test_file_io(datapath, parser, mode):
     tm.assert_frame_equal(df_io, df_expected)
 
 
-def test_file_buffered_reader_string(datapath, parser, mode):
-    filename = datapath("io", "data", "xml", "books.xml")
-    with open(filename, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_file_buffered_reader_string(xml_books, parser, mode):
+    with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f:
         xml_obj = f.read()
 
     df_str = read_xml(xml_obj, parser=parser)
@@ -410,9 +406,8 @@ def test_file_buffered_reader_string(datapath, parser, mode):
     tm.assert_frame_equal(df_str, df_expected)
 
 
-def test_file_buffered_reader_no_xml_declaration(datapath, parser, mode):
-    filename = datapath("io", "data", "xml", "books.xml")
-    with open(filename, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_file_buffered_reader_no_xml_declaration(xml_books, parser, mode):
+    with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f:
         next(f)
         xml_obj = f.read()
 
@@ -441,10 +436,8 @@ def test_string_charset(parser):
     tm.assert_frame_equal(df_str, df_expected)
 
 
-def test_file_charset(datapath, parser):
-    xml_file = datapath("io", "data", "xml", "doc_ch_utf.xml")
-
-    df_file = read_xml(datapath(xml_file), parser=parser)
+def test_file_charset(xml_doc_ch_utf, parser):
+    df_file = read_xml(xml_doc_ch_utf, parser=parser)
 
     df_expected = DataFrame(
         {
@@ -474,10 +467,8 @@ def test_file_charset(datapath, parser):
     tm.assert_frame_equal(df_file, df_expected)
 
 
-def test_file_handle_close(datapath, parser):
-    xml_file = datapath("io", "data", "xml", "books.xml")
-
-    with open(xml_file, "rb") as f:
+def test_file_handle_close(xml_books, parser):
+    with open(xml_books, "rb") as f:
         read_xml(BytesIO(f.read()), parser=parser)
 
         assert not f.closed
@@ -564,27 +555,24 @@ def test_wrong_url(parser):
 
 
 @td.skip_if_no("lxml")
-def test_empty_xpath_lxml(datapath):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_empty_xpath_lxml(xml_books):
     with pytest.raises(ValueError, match=("xpath does not return any nodes")):
-        read_xml(filename, xpath=".//python", parser="lxml")
+        read_xml(xml_books, xpath=".//python", parser="lxml")
 
 
-def test_bad_xpath_etree(datapath):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_bad_xpath_etree(xml_books):
     with pytest.raises(
         SyntaxError, match=("You have used an incorrect or unsupported XPath")
     ):
-        read_xml(filename, xpath=".//[book]", parser="etree")
+        read_xml(xml_books, xpath=".//[book]", parser="etree")
 
 
 @td.skip_if_no("lxml")
-def test_bad_xpath_lxml(datapath):
+def test_bad_xpath_lxml(xml_books):
     from lxml.etree import XPathEvalError
 
-    filename = datapath("io", "data", "xml", "books.xml")
     with pytest.raises(XPathEvalError, match=("Invalid expression")):
-        read_xml(filename, xpath=".//[book]", parser="lxml")
+        read_xml(xml_books, xpath=".//[book]", parser="lxml")
 
 
 # NAMESPACE
@@ -680,25 +668,22 @@ def test_consistency_prefix_namespace():
 # PREFIX
 
 
-def test_missing_prefix_with_default_namespace(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_missing_prefix_with_default_namespace(xml_books, parser):
     with pytest.raises(ValueError, match=("xpath does not return any nodes")):
-        read_xml(filename, xpath=".//Placemark", parser=parser)
+        read_xml(xml_books, xpath=".//Placemark", parser=parser)
 
 
-def test_missing_prefix_definition_etree(datapath):
-    filename = datapath("io", "data", "xml", "cta_rail_lines.kml")
+def test_missing_prefix_definition_etree(kml_cta_rail_lines):
     with pytest.raises(SyntaxError, match=("you used an undeclared namespace prefix")):
-        read_xml(filename, xpath=".//kml:Placemark", parser="etree")
+        read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="etree")
 
 
 @td.skip_if_no("lxml")
-def test_missing_prefix_definition_lxml(datapath):
+def test_missing_prefix_definition_lxml(kml_cta_rail_lines):
     from lxml.etree import XPathEvalError
 
-    filename = datapath("io", "data", "xml", "cta_rail_lines.kml")
     with pytest.raises(XPathEvalError, match=("Undefined namespace prefix")):
-        read_xml(filename, xpath=".//kml:Placemark", parser="lxml")
+        read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="lxml")
 
 
 @td.skip_if_no("lxml")
@@ -718,11 +703,10 @@ def test_none_namespace_prefix(key):
 # ELEMS AND ATTRS
 
 
-def test_file_elems_and_attrs(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file = read_xml(filename, parser=parser)
+def test_file_elems_and_attrs(xml_books, parser):
+    df_file = read_xml(xml_books, parser=parser)
     df_iter = read_xml(
-        filename,
+        xml_books,
         parser=parser,
         iterparse={"book": ["category", "title", "author", "year", "price"]},
     )
@@ -740,21 +724,19 @@ def test_file_elems_and_attrs(datapath, parser):
     tm.assert_frame_equal(df_iter, df_expected)
 
 
-def test_file_only_attrs(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file = read_xml(filename, attrs_only=True, parser=parser)
-    df_iter = read_xml(filename, parser=parser, iterparse={"book": ["category"]})
+def test_file_only_attrs(xml_books, parser):
+    df_file = read_xml(xml_books, attrs_only=True, parser=parser)
+    df_iter = read_xml(xml_books, parser=parser, iterparse={"book": ["category"]})
     df_expected = DataFrame({"category": ["cooking", "children", "web"]})
 
     tm.assert_frame_equal(df_file, df_expected)
     tm.assert_frame_equal(df_iter, df_expected)
 
 
-def test_file_only_elems(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
-    df_file = read_xml(filename, elems_only=True, parser=parser)
+def test_file_only_elems(xml_books, parser):
+    df_file = read_xml(xml_books, elems_only=True, parser=parser)
     df_iter = read_xml(
-        filename,
+        xml_books,
         parser=parser,
         iterparse={"book": ["title", "author", "year", "price"]},
     )
@@ -771,13 +753,12 @@ def test_file_only_elems(datapath, parser):
     tm.assert_frame_equal(df_iter, df_expected)
 
 
-def test_elem_and_attrs_only(datapath, parser):
-    filename = datapath("io", "data", "xml", "cta_rail_lines.kml")
+def test_elem_and_attrs_only(kml_cta_rail_lines, parser):
     with pytest.raises(
         ValueError,
         match=("Either element or attributes can be parsed not both"),
     ):
-        read_xml(filename, elems_only=True, attrs_only=True, parser=parser)
+        read_xml(kml_cta_rail_lines, elems_only=True, attrs_only=True, parser=parser)
 
 
 def test_empty_attrs_only(parser):
@@ -856,13 +837,12 @@ def test_attribute_centric_xml():
 # NAMES
 
 
-def test_names_option_output(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_names_option_output(xml_books, parser):
     df_file = read_xml(
-        filename, names=["Col1", "Col2", "Col3", "Col4", "Col5"], parser=parser
+        xml_books, names=["Col1", "Col2", "Col3", "Col4", "Col5"], parser=parser
     )
     df_iter = read_xml(
-        filename,
+        xml_books,
         parser=parser,
         names=["Col1", "Col2", "Col3", "Col4", "Col5"],
         iterparse={"book": ["category", "title", "author", "year", "price"]},
@@ -1006,31 +986,25 @@ def test_repeat_elements(parser):
     tm.assert_frame_equal(df_iter, df_expected)
 
 
-def test_names_option_wrong_length(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
-
+def test_names_option_wrong_length(xml_books, parser):
     with pytest.raises(ValueError, match=("names does not match length")):
-        read_xml(filename, names=["Col1", "Col2", "Col3"], parser=parser)
+        read_xml(xml_books, names=["Col1", "Col2", "Col3"], parser=parser)
 
 
-def test_names_option_wrong_type(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
-
+def test_names_option_wrong_type(xml_books, parser):
     with pytest.raises(TypeError, match=("is not a valid type for names")):
-        read_xml(filename, names="Col1, Col2, Col3", parser=parser)
+        read_xml(xml_books, names="Col1, Col2, Col3", parser=parser)
 
 
 # ENCODING
 
 
-def test_wrong_encoding(datapath, parser):
-    filename = datapath("io", "data", "xml", "baby_names.xml")
+def test_wrong_encoding(xml_baby_names, parser):
     with pytest.raises(UnicodeDecodeError, match=("'utf-8' codec can't decode")):
-        read_xml(filename, parser=parser)
+        read_xml(xml_baby_names, parser=parser)
 
 
-def test_utf16_encoding(datapath, parser):
-    filename = datapath("io", "data", "xml", "baby_names.xml")
+def test_utf16_encoding(xml_baby_names, parser):
     with pytest.raises(
         UnicodeError,
         match=(
@@ -1038,35 +1012,32 @@ def test_utf16_encoding(datapath, parser):
             "'utf-16-le' codec can't decode byte"
         ),
     ):
-        read_xml(filename, encoding="UTF-16", parser=parser)
+        read_xml(xml_baby_names, encoding="UTF-16", parser=parser)
 
 
-def test_unknown_encoding(datapath, parser):
-    filename = datapath("io", "data", "xml", "baby_names.xml")
+def test_unknown_encoding(xml_baby_names, parser):
     with pytest.raises(LookupError, match=("unknown encoding: UFT-8")):
-        read_xml(filename, encoding="UFT-8", parser=parser)
+        read_xml(xml_baby_names, encoding="UFT-8", parser=parser)
 
 
-def test_ascii_encoding(datapath, parser):
-    filename = datapath("io", "data", "xml", "baby_names.xml")
+def test_ascii_encoding(xml_baby_names, parser):
     with pytest.raises(UnicodeDecodeError, match=("'ascii' codec can't decode byte")):
-        read_xml(filename, encoding="ascii", parser=parser)
+        read_xml(xml_baby_names, encoding="ascii", parser=parser)
 
 
 @td.skip_if_no("lxml")
-def test_parser_consistency_with_encoding(datapath):
-    filename = datapath("io", "data", "xml", "baby_names.xml")
-    df_xpath_lxml = read_xml(filename, parser="lxml", encoding="ISO-8859-1")
-    df_xpath_etree = read_xml(filename, parser="etree", encoding="iso-8859-1")
+def test_parser_consistency_with_encoding(xml_baby_names):
+    df_xpath_lxml = read_xml(xml_baby_names, parser="lxml", encoding="ISO-8859-1")
+    df_xpath_etree = read_xml(xml_baby_names, parser="etree", encoding="iso-8859-1")
 
     df_iter_lxml = read_xml(
-        filename,
+        xml_baby_names,
         parser="lxml",
         encoding="ISO-8859-1",
         iterparse={"row": ["rank", "malename", "femalename"]},
     )
     df_iter_etree = read_xml(
-        filename,
+        xml_baby_names,
         parser="etree",
         encoding="ISO-8859-1",
         iterparse={"row": ["rank", "malename", "femalename"]},
@@ -1107,41 +1078,34 @@ def test_none_encoding_etree():
 
 
 @td.skip_if_installed("lxml")
-def test_default_parser_no_lxml(datapath):
-    filename = datapath("io", "data", "xml", "books.xml")
-
+def test_default_parser_no_lxml(xml_books):
     with pytest.raises(
         ImportError, match=("lxml not found, please install or use the etree parser.")
     ):
-        read_xml(filename)
-
+        read_xml(xml_books)
 
-def test_wrong_parser(datapath):
-    filename = datapath("io", "data", "xml", "books.xml")
 
+def test_wrong_parser(xml_books):
     with pytest.raises(
         ValueError, match=("Values for parser can only be lxml or etree.")
     ):
-        read_xml(filename, parser="bs4")
+        read_xml(xml_books, parser="bs4")
 
 
 # STYLESHEET
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_file(datapath):
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-    xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
-
+def test_stylesheet_file(kml_cta_rail_lines, xsl_flatten_doc):
     df_style = read_xml(
-        kml,
+        kml_cta_rail_lines,
         xpath=".//k:Placemark",
         namespaces={"k": "http://www.opengis.net/kml/2.2"},
-        stylesheet=xsl,
+        stylesheet=xsl_flatten_doc,
     )
 
     df_iter = read_xml(
-        kml,
+        kml_cta_rail_lines,
         iterparse={
             "Placemark": [
                 "id",
@@ -1159,13 +1123,10 @@ def test_stylesheet_file(datapath):
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_file_like(datapath, mode):
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-    xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
-
-    with open(xsl, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_stylesheet_file_like(kml_cta_rail_lines, xsl_flatten_doc, mode):
+    with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
         df_style = read_xml(
-            kml,
+            kml_cta_rail_lines,
             xpath=".//k:Placemark",
             namespaces={"k": "http://www.opengis.net/kml/2.2"},
             stylesheet=f,
@@ -1175,22 +1136,19 @@ def test_stylesheet_file_like(datapath, mode):
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_io(datapath, mode):
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-    xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
-
+def test_stylesheet_io(kml_cta_rail_lines, xsl_flatten_doc, mode):
     # note: By default the bodies of untyped functions are not checked,
     # consider using --check-untyped-defs
     xsl_obj: BytesIO | StringIO  # type: ignore[annotation-unchecked]
 
-    with open(xsl, mode, encoding="utf-8" if mode == "r" else None) as f:
+    with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
         if mode == "rb":
             xsl_obj = BytesIO(f.read())
         else:
             xsl_obj = StringIO(f.read())
 
     df_style = read_xml(
-        kml,
+        kml_cta_rail_lines,
         xpath=".//k:Placemark",
         namespaces={"k": "http://www.opengis.net/kml/2.2"},
         stylesheet=xsl_obj,
@@ -1200,15 +1158,12 @@ def test_stylesheet_io(datapath, mode):
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_buffered_reader(datapath, mode):
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-    xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
-
-    with open(xsl, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_stylesheet_buffered_reader(kml_cta_rail_lines, xsl_flatten_doc, mode):
+    with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
         xsl_obj = f.read()
 
     df_style = read_xml(
-        kml,
+        kml_cta_rail_lines,
         xpath=".//k:Placemark",
         namespaces={"k": "http://www.opengis.net/kml/2.2"},
         stylesheet=xsl_obj,
@@ -1247,18 +1202,15 @@ def test_style_charset():
 
 
 @td.skip_if_no("lxml")
-def test_not_stylesheet(datapath):
+def test_not_stylesheet(kml_cta_rail_lines, xml_books):
     from lxml.etree import XSLTParseError
 
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-    xsl = datapath("io", "data", "xml", "books.xml")
-
     with pytest.raises(XSLTParseError, match=("document is not a stylesheet")):
-        read_xml(kml, stylesheet=xsl)
+        read_xml(kml_cta_rail_lines, stylesheet=xml_books)
 
 
 @td.skip_if_no("lxml")
-def test_incorrect_xsl_syntax(datapath):
+def test_incorrect_xsl_syntax(kml_cta_rail_lines):
     from lxml.etree import XMLSyntaxError
 
     xsl = """\
@@ -1281,16 +1233,14 @@ def test_incorrect_xsl_syntax(datapath):
     <xsl:template match="k:description|k:Snippet|k:Style"/>
 </xsl:stylesheet>"""
 
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-
     with pytest.raises(
         XMLSyntaxError, match=("Extra content at the end of the document")
     ):
-        read_xml(kml, stylesheet=xsl)
+        read_xml(kml_cta_rail_lines, stylesheet=xsl)
 
 
 @td.skip_if_no("lxml")
-def test_incorrect_xsl_eval(datapath):
+def test_incorrect_xsl_eval(kml_cta_rail_lines):
     from lxml.etree import XSLTParseError
 
     xsl = """\
@@ -1313,14 +1263,12 @@ def test_incorrect_xsl_eval(datapath):
     <xsl:template match="k:description|k:Snippet|k:Style"/>
 </xsl:stylesheet>"""
 
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-
     with pytest.raises(XSLTParseError, match=("failed to compile")):
-        read_xml(kml, stylesheet=xsl)
+        read_xml(kml_cta_rail_lines, stylesheet=xsl)
 
 
 @td.skip_if_no("lxml")
-def test_incorrect_xsl_apply(datapath):
+def test_incorrect_xsl_apply(kml_cta_rail_lines):
     from lxml.etree import XSLTApplyError
 
     xsl = """\
@@ -1335,55 +1283,46 @@ def test_incorrect_xsl_apply(datapath):
     </xsl:template>
 </xsl:stylesheet>"""
 
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-
     with pytest.raises(XSLTApplyError, match=("Cannot resolve URI")):
-        read_xml(kml, stylesheet=xsl)
+        read_xml(kml_cta_rail_lines, stylesheet=xsl)
 
 
 @td.skip_if_no("lxml")
-def test_wrong_stylesheet():
+def test_wrong_stylesheet(kml_cta_rail_lines, xml_data_path):
     from lxml.etree import XMLSyntaxError
 
-    kml = os.path.join("data", "xml", "cta_rail_lines.kml")
-    xsl = os.path.join("data", "xml", "flatten.xsl")
+    xsl = xml_data_path / "flatten.xsl"
 
     with pytest.raises(
         XMLSyntaxError,
         match=("Start tag expected, '<' not found"),
     ):
-        read_xml(kml, stylesheet=xsl)
+        read_xml(kml_cta_rail_lines, stylesheet=xsl)
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_file_close(datapath, mode):
-    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
-    xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
-
+def test_stylesheet_file_close(kml_cta_rail_lines, xsl_flatten_doc, mode):
     # note: By default the bodies of untyped functions are not checked,
     # consider using --check-untyped-defs
     xsl_obj: BytesIO | StringIO  # type: ignore[annotation-unchecked]
 
-    with open(xsl, mode, encoding="utf-8" if mode == "r" else None) as f:
+    with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
         if mode == "rb":
             xsl_obj = BytesIO(f.read())
         else:
             xsl_obj = StringIO(f.read())
 
-        read_xml(kml, stylesheet=xsl_obj)
+        read_xml(kml_cta_rail_lines, stylesheet=xsl_obj)
 
         assert not f.closed
 
 
 @td.skip_if_no("lxml")
-def test_stylesheet_with_etree():
-    kml = os.path.join("data", "xml", "cta_rail_lines.kml")
-    xsl = os.path.join("data", "xml", "flatten_doc.xsl")
-
+def test_stylesheet_with_etree(kml_cta_rail_lines, xsl_flatten_doc):
     with pytest.raises(
         ValueError, match=("To use stylesheet, you need lxml installed")
     ):
-        read_xml(kml, parser="etree", stylesheet=xsl)
+        read_xml(kml_cta_rail_lines, parser="etree", stylesheet=xsl_flatten_doc)
 
 
 @td.skip_if_no("lxml")
@@ -1413,10 +1352,8 @@ def test_string_error(parser):
         )
 
 
-def test_file_like_iterparse(datapath, parser, mode):
-    filename = datapath("io", "data", "xml", "books.xml")
-
-    with open(filename, mode, encoding="utf-8" if mode == "r" else None) as f:
+def test_file_like_iterparse(xml_books, parser, mode):
+    with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f:
         if mode == "r" and parser == "lxml":
             with pytest.raises(
                 TypeError, match=("reading file objects must return bytes objects")
@@ -1449,12 +1386,10 @@ def test_file_like_iterparse(datapath, parser, mode):
     tm.assert_frame_equal(df_filelike, df_expected)
 
 
-def test_file_io_iterparse(datapath, parser, mode):
-    filename = datapath("io", "data", "xml", "books.xml")
-
+def test_file_io_iterparse(xml_books, parser, mode):
     funcIO = StringIO if mode == "r" else BytesIO
     with open(
-        filename,
+        xml_books,
         mode,
         encoding="utf-8" if mode == "r" else None,
     ) as f:
@@ -1522,22 +1457,20 @@ def test_compression_error(parser, compression_only):
             )
 
 
-def test_wrong_dict_type(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_wrong_dict_type(xml_books, parser):
     with pytest.raises(TypeError, match="list is not a valid type for iterparse"):
         read_xml(
-            filename,
+            xml_books,
             parser=parser,
             iterparse=["category", "title", "year", "author", "price"],
         )
 
 
-def test_wrong_dict_value(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_wrong_dict_value(xml_books, parser):
     with pytest.raises(
         TypeError, match="<class 'str'> is not a valid type for value in iterparse"
     ):
-        read_xml(filename, parser=parser, iterparse={"book": "category"})
+        read_xml(xml_books, parser=parser, iterparse={"book": "category"})
 
 
 def test_bad_xml(parser):
@@ -1688,23 +1621,21 @@ def test_processing_instruction(parser):
     tm.assert_frame_equal(df_iter, df_expected)
 
 
-def test_no_result(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_no_result(xml_books, parser):
     with pytest.raises(
         ParserError, match="No result from selected items in iterparse."
     ):
         read_xml(
-            filename,
+            xml_books,
             parser=parser,
             iterparse={"node": ["attr1", "elem1", "elem2", "elem3"]},
         )
 
 
-def test_empty_data(datapath, parser):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_empty_data(xml_books, parser):
     with pytest.raises(EmptyDataError, match="No columns to parse from file"):
         read_xml(
-            filename,
+            xml_books,
             parser=parser,
             iterparse={"book": ["attr1", "elem1", "elem2", "elem3"]},
         )
diff --git a/pandas/tests/io/xml/test_xml_dtypes.py b/pandas/tests/io/xml/test_xml_dtypes.py
index d62b9fa27e264..911b540dbc380 100644
--- a/pandas/tests/io/xml/test_xml_dtypes.py
+++ b/pandas/tests/io/xml/test_xml_dtypes.py
@@ -194,12 +194,13 @@ def test_dtype_float(parser):
     tm.assert_frame_equal(df_iter, df_expected)
 
 
-def test_wrong_dtype(datapath, parser, iterparse):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_wrong_dtype(xml_books, parser, iterparse):
     with pytest.raises(
         ValueError, match=('Unable to parse string "Everyday Italian" at position 0')
     ):
-        read_xml(filename, dtype={"title": "Int64"}, parser=parser, iterparse=iterparse)
+        read_xml(
+            xml_books, dtype={"title": "Int64"}, parser=parser, iterparse=iterparse
+        )
 
 
 def test_both_dtype_converters(parser):
@@ -279,25 +280,24 @@ def test_converters_date(parser):
     tm.assert_frame_equal(df_iter, df_expected)
 
 
-def test_wrong_converters_type(datapath, parser, iterparse):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_wrong_converters_type(xml_books, parser, iterparse):
     with pytest.raises(TypeError, match=("Type converters must be a dict or subclass")):
-        read_xml(filename, converters={"year", str}, parser=parser, iterparse=iterparse)
+        read_xml(
+            xml_books, converters={"year", str}, parser=parser, iterparse=iterparse
+        )
 
 
-def test_callable_func_converters(datapath, parser, iterparse):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_callable_func_converters(xml_books, parser, iterparse):
     with pytest.raises(TypeError, match=("'float' object is not callable")):
         read_xml(
-            filename, converters={"year": float()}, parser=parser, iterparse=iterparse
+            xml_books, converters={"year": float()}, parser=parser, iterparse=iterparse
         )
 
 
-def test_callable_str_converters(datapath, parser, iterparse):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_callable_str_converters(xml_books, parser, iterparse):
     with pytest.raises(TypeError, match=("'str' object is not callable")):
         read_xml(
-            filename, converters={"year": "float"}, parser=parser, iterparse=iterparse
+            xml_books, converters={"year": "float"}, parser=parser, iterparse=iterparse
         )
 
 
@@ -471,9 +471,8 @@ def test_day_first_parse_dates(parser):
         tm.assert_frame_equal(df_iter, df_expected)
 
 
-def test_wrong_parse_dates_type(datapath, parser, iterparse):
-    filename = datapath("io", "data", "xml", "books.xml")
+def test_wrong_parse_dates_type(xml_books, parser, iterparse):
     with pytest.raises(
         TypeError, match=("Only booleans, lists, and dictionaries are accepted")
     ):
-        read_xml(filename, parse_dates={"date"}, parser=parser, iterparse=iterparse)
+        read_xml(xml_books, parse_dates={"date"}, parser=parser, iterparse=iterparse)
diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py
index df3f74fa9bc7c..ab4dd58e18ce8 100644
--- a/pandas/tests/libs/test_hashtable.py
+++ b/pandas/tests/libs/test_hashtable.py
@@ -461,7 +461,7 @@ def test_get_labels_groupby_for_Int64(writable):
 
 def test_tracemalloc_works_for_StringHashTable():
     N = 1000
-    keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)
+    keys = np.arange(N).astype(np.str_).astype(np.object_)
     with activated_tracemalloc():
         table = ht.StringHashTable()
         table.map_locations(keys)
@@ -484,7 +484,7 @@ def test_tracemalloc_for_empty_StringHashTable():
 
 @pytest.mark.parametrize("N", range(1, 110))
 def test_no_reallocation_StringHashTable(N):
-    keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)
+    keys = np.arange(N).astype(np.str_).astype(np.object_)
     preallocated_table = ht.StringHashTable(N)
     n_buckets_start = preallocated_table.get_state()["n_buckets"]
     preallocated_table.map_locations(keys)
diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py
index 383e1b81e17a7..8583d8bcc052c 100644
--- a/pandas/tests/libs/test_lib.py
+++ b/pandas/tests/libs/test_lib.py
@@ -6,6 +6,7 @@
     lib,
     writers as libwriters,
 )
+from pandas.compat import IS64
 
 from pandas import Index
 import pandas._testing as tm
@@ -58,7 +59,7 @@ def test_fast_multiget_timedelta_resos(self):
         tm.assert_numpy_array_equal(result, expected)
 
         # case that can't be cast to td64ns
-        td = Timedelta(np.timedelta64(400, "Y"))
+        td = Timedelta(np.timedelta64(146000, "D"))
         assert hash(td) == hash(td.as_unit("ms"))
         assert hash(td) == hash(td.as_unit("us"))
         mapping1 = {td: 1}
@@ -248,6 +249,18 @@ def test_is_range_indexer(self, dtype):
         left = np.arange(0, 100, dtype=dtype)
         assert lib.is_range_indexer(left, 100)
 
+    @pytest.mark.skipif(
+        not IS64,
+        reason="2**31 is too big for Py_ssize_t on 32-bit. "
+        "It doesn't matter though since you cannot create an array that long on 32-bit",
+    )
+    @pytest.mark.parametrize("dtype", ["int64", "int32"])
+    def test_is_range_indexer_big_n(self, dtype):
+        # GH53616
+        left = np.arange(0, 100, dtype=dtype)
+
+        assert not lib.is_range_indexer(left, 2**31)
+
     @pytest.mark.parametrize("dtype", ["int64", "int32"])
     def test_is_range_indexer_not_equal(self, dtype):
         # GH#50592
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 921f2b3ef3368..8b357754085c8 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -11,9 +11,6 @@
 
 import numpy as np
 
-from pandas.util._decorators import cache_readonly
-import pandas.util._test_decorators as td
-
 from pandas.core.dtypes.api import is_list_like
 
 import pandas as pd
@@ -24,492 +21,475 @@
     from matplotlib.axes import Axes
 
 
-@td.skip_if_no_mpl
-class TestPlotBase:
+def _check_legend_labels(axes, labels=None, visible=True):
+    """
+    Check each axes has expected legend labels
+
+    Parameters
+    ----------
+    axes : matplotlib Axes object, or its list-like
+    labels : list-like
+        expected legend labels
+    visible : bool
+        expected legend visibility. labels are checked only when visible is
+        True
+    """
+    if visible and (labels is None):
+        raise ValueError("labels must be specified when visible is True")
+    axes = _flatten_visible(axes)
+    for ax in axes:
+        if visible:
+            assert ax.get_legend() is not None
+            _check_text_labels(ax.get_legend().get_texts(), labels)
+        else:
+            assert ax.get_legend() is None
+
+
+def _check_legend_marker(ax, expected_markers=None, visible=True):
+    """
+    Check ax has expected legend markers
+
+    Parameters
+    ----------
+    ax : matplotlib Axes object
+    expected_markers : list-like
+        expected legend markers
+    visible : bool
+        expected legend visibility. labels are checked only when visible is
+        True
+    """
+    if visible and (expected_markers is None):
+        raise ValueError("Markers must be specified when visible is True")
+    if visible:
+        handles, _ = ax.get_legend_handles_labels()
+        markers = [handle.get_marker() for handle in handles]
+        assert markers == expected_markers
+    else:
+        assert ax.get_legend() is None
+
+
+def _check_data(xp, rs):
+    """
+    Check each axes has identical lines
+
+    Parameters
+    ----------
+    xp : matplotlib Axes object
+    rs : matplotlib Axes object
+    """
+    xp_lines = xp.get_lines()
+    rs_lines = rs.get_lines()
+
+    assert len(xp_lines) == len(rs_lines)
+    for xpl, rsl in zip(xp_lines, rs_lines):
+        xpdata = xpl.get_xydata()
+        rsdata = rsl.get_xydata()
+        tm.assert_almost_equal(xpdata, rsdata)
+
+    tm.close()
+
+
+def _check_visible(collections, visible=True):
+    """
+    Check each artist is visible or not
+
+    Parameters
+    ----------
+    collections : matplotlib Artist or its list-like
+        target Artist or its list or collection
+    visible : bool
+        expected visibility
+    """
+    from matplotlib.collections import Collection
+
+    if not isinstance(collections, Collection) and not is_list_like(collections):
+        collections = [collections]
+
+    for patch in collections:
+        assert patch.get_visible() == visible
+
+
+def _check_patches_all_filled(axes: Axes | Sequence[Axes], filled: bool = True) -> None:
     """
-    This is a common base class used for various plotting tests
+    Check for each artist whether it is filled or not
+
+    Parameters
+    ----------
+    axes : matplotlib Axes object, or its list-like
+    filled : bool
+        expected filling
     """
 
-    def setup_method(self):
-        import matplotlib as mpl
+    axes = _flatten_visible(axes)
+    for ax in axes:
+        for patch in ax.patches:
+            assert patch.fill == filled
 
-        mpl.rcdefaults()
 
-    def teardown_method(self):
-        tm.close()
+def _get_colors_mapped(series, colors):
+    unique = series.unique()
+    # unique and colors length can be differed
+    # depending on slice value
+    mapped = dict(zip(unique, colors))
+    return [mapped[v] for v in series.values]
 
-    @cache_readonly
-    def plt(self):
-        import matplotlib.pyplot as plt
 
-        return plt
+def _check_colors(collections, linecolors=None, facecolors=None, mapping=None):
+    """
+    Check each artist has expected line colors and face colors
 
-    @cache_readonly
-    def colorconverter(self):
-        from matplotlib import colors
+    Parameters
+    ----------
+    collections : list-like
+        list or collection of target artist
+    linecolors : list-like which has the same length as collections
+        list of expected line colors
+    facecolors : list-like which has the same length as collections
+        list of expected face colors
+    mapping : Series
+        Series used for color grouping key
+        used for andrew_curves, parallel_coordinates, radviz test
+    """
+    from matplotlib import colors
+    from matplotlib.collections import (
+        Collection,
+        LineCollection,
+        PolyCollection,
+    )
+    from matplotlib.lines import Line2D
+
+    conv = colors.ColorConverter
+    if linecolors is not None:
+        if mapping is not None:
+            linecolors = _get_colors_mapped(mapping, linecolors)
+            linecolors = linecolors[: len(collections)]
+
+        assert len(collections) == len(linecolors)
+        for patch, color in zip(collections, linecolors):
+            if isinstance(patch, Line2D):
+                result = patch.get_color()
+                # Line2D may contains string color expression
+                result = conv.to_rgba(result)
+            elif isinstance(patch, (PolyCollection, LineCollection)):
+                result = tuple(patch.get_edgecolor()[0])
+            else:
+                result = patch.get_edgecolor()
 
-        return colors.colorConverter
+            expected = conv.to_rgba(color)
+            assert result == expected
 
-    def _check_legend_labels(self, axes, labels=None, visible=True):
-        """
-        Check each axes has expected legend labels
+    if facecolors is not None:
+        if mapping is not None:
+            facecolors = _get_colors_mapped(mapping, facecolors)
+            facecolors = facecolors[: len(collections)]
 
-        Parameters
-        ----------
-        axes : matplotlib Axes object, or its list-like
-        labels : list-like
-            expected legend labels
-        visible : bool
-            expected legend visibility. labels are checked only when visible is
-            True
-        """
-        if visible and (labels is None):
-            raise ValueError("labels must be specified when visible is True")
-        axes = self._flatten_visible(axes)
-        for ax in axes:
-            if visible:
-                assert ax.get_legend() is not None
-                self._check_text_labels(ax.get_legend().get_texts(), labels)
+        assert len(collections) == len(facecolors)
+        for patch, color in zip(collections, facecolors):
+            if isinstance(patch, Collection):
+                # returned as list of np.array
+                result = patch.get_facecolor()[0]
             else:
-                assert ax.get_legend() is None
-
-    def _check_legend_marker(self, ax, expected_markers=None, visible=True):
-        """
-        Check ax has expected legend markers
-
-        Parameters
-        ----------
-        ax : matplotlib Axes object
-        expected_markers : list-like
-            expected legend markers
-        visible : bool
-            expected legend visibility. labels are checked only when visible is
-            True
-        """
-        if visible and (expected_markers is None):
-            raise ValueError("Markers must be specified when visible is True")
-        if visible:
-            handles, _ = ax.get_legend_handles_labels()
-            markers = [handle.get_marker() for handle in handles]
-            assert markers == expected_markers
-        else:
-            assert ax.get_legend() is None
+                result = patch.get_facecolor()
 
-    def _check_data(self, xp, rs):
-        """
-        Check each axes has identical lines
-
-        Parameters
-        ----------
-        xp : matplotlib Axes object
-        rs : matplotlib Axes object
-        """
-        xp_lines = xp.get_lines()
-        rs_lines = rs.get_lines()
-
-        assert len(xp_lines) == len(rs_lines)
-        for xpl, rsl in zip(xp_lines, rs_lines):
-            xpdata = xpl.get_xydata()
-            rsdata = rsl.get_xydata()
-            tm.assert_almost_equal(xpdata, rsdata)
-
-        tm.close()
-
-    def _check_visible(self, collections, visible=True):
-        """
-        Check each artist is visible or not
-
-        Parameters
-        ----------
-        collections : matplotlib Artist or its list-like
-            target Artist or its list or collection
-        visible : bool
-            expected visibility
-        """
-        from matplotlib.collections import Collection
-
-        if not isinstance(collections, Collection) and not is_list_like(collections):
-            collections = [collections]
-
-        for patch in collections:
-            assert patch.get_visible() == visible
-
-    def _check_patches_all_filled(
-        self, axes: Axes | Sequence[Axes], filled: bool = True
-    ) -> None:
-        """
-        Check for each artist whether it is filled or not
-
-        Parameters
-        ----------
-        axes : matplotlib Axes object, or its list-like
-        filled : bool
-            expected filling
-        """
-
-        axes = self._flatten_visible(axes)
-        for ax in axes:
-            for patch in ax.patches:
-                assert patch.fill == filled
-
-    def _get_colors_mapped(self, series, colors):
-        unique = series.unique()
-        # unique and colors length can be differed
-        # depending on slice value
-        mapped = dict(zip(unique, colors))
-        return [mapped[v] for v in series.values]
-
-    def _check_colors(
-        self, collections, linecolors=None, facecolors=None, mapping=None
-    ):
-        """
-        Check each artist has expected line colors and face colors
-
-        Parameters
-        ----------
-        collections : list-like
-            list or collection of target artist
-        linecolors : list-like which has the same length as collections
-            list of expected line colors
-        facecolors : list-like which has the same length as collections
-            list of expected face colors
-        mapping : Series
-            Series used for color grouping key
-            used for andrew_curves, parallel_coordinates, radviz test
-        """
-        from matplotlib.collections import (
-            Collection,
-            LineCollection,
-            PolyCollection,
-        )
-        from matplotlib.lines import Line2D
-
-        conv = self.colorconverter
-        if linecolors is not None:
-            if mapping is not None:
-                linecolors = self._get_colors_mapped(mapping, linecolors)
-                linecolors = linecolors[: len(collections)]
-
-            assert len(collections) == len(linecolors)
-            for patch, color in zip(collections, linecolors):
-                if isinstance(patch, Line2D):
-                    result = patch.get_color()
-                    # Line2D may contains string color expression
-                    result = conv.to_rgba(result)
-                elif isinstance(patch, (PolyCollection, LineCollection)):
-                    result = tuple(patch.get_edgecolor()[0])
-                else:
-                    result = patch.get_edgecolor()
-
-                expected = conv.to_rgba(color)
-                assert result == expected
-
-        if facecolors is not None:
-            if mapping is not None:
-                facecolors = self._get_colors_mapped(mapping, facecolors)
-                facecolors = facecolors[: len(collections)]
-
-            assert len(collections) == len(facecolors)
-            for patch, color in zip(collections, facecolors):
-                if isinstance(patch, Collection):
-                    # returned as list of np.array
-                    result = patch.get_facecolor()[0]
-                else:
-                    result = patch.get_facecolor()
-
-                if isinstance(result, np.ndarray):
-                    result = tuple(result)
-
-                expected = conv.to_rgba(color)
-                assert result == expected
-
-    def _check_text_labels(self, texts, expected):
-        """
-        Check each text has expected labels
-
-        Parameters
-        ----------
-        texts : matplotlib Text object, or its list-like
-            target text, or its list
-        expected : str or list-like which has the same length as texts
-            expected text label, or its list
-        """
-        if not is_list_like(texts):
-            assert texts.get_text() == expected
-        else:
-            labels = [t.get_text() for t in texts]
-            assert len(labels) == len(expected)
-            for label, e in zip(labels, expected):
-                assert label == e
-
-    def _check_ticks_props(
-        self, axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None
-    ):
-        """
-        Check each axes has expected tick properties
-
-        Parameters
-        ----------
-        axes : matplotlib Axes object, or its list-like
-        xlabelsize : number
-            expected xticks font size
-        xrot : number
-            expected xticks rotation
-        ylabelsize : number
-            expected yticks font size
-        yrot : number
-            expected yticks rotation
-        """
-        from matplotlib.ticker import NullFormatter
-
-        axes = self._flatten_visible(axes)
-        for ax in axes:
-            if xlabelsize is not None or xrot is not None:
-                if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
-                    # If minor ticks has NullFormatter, rot / fontsize are not
-                    # retained
-                    labels = ax.get_xticklabels()
-                else:
-                    labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
-
-                for label in labels:
-                    if xlabelsize is not None:
-                        tm.assert_almost_equal(label.get_fontsize(), xlabelsize)
-                    if xrot is not None:
-                        tm.assert_almost_equal(label.get_rotation(), xrot)
-
-            if ylabelsize is not None or yrot is not None:
-                if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
-                    labels = ax.get_yticklabels()
-                else:
-                    labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
-
-                for label in labels:
-                    if ylabelsize is not None:
-                        tm.assert_almost_equal(label.get_fontsize(), ylabelsize)
-                    if yrot is not None:
-                        tm.assert_almost_equal(label.get_rotation(), yrot)
-
-    def _check_ax_scales(self, axes, xaxis="linear", yaxis="linear"):
-        """
-        Check each axes has expected scales
-
-        Parameters
-        ----------
-        axes : matplotlib Axes object, or its list-like
-        xaxis : {'linear', 'log'}
-            expected xaxis scale
-        yaxis : {'linear', 'log'}
-            expected yaxis scale
-        """
-        axes = self._flatten_visible(axes)
-        for ax in axes:
-            assert ax.xaxis.get_scale() == xaxis
-            assert ax.yaxis.get_scale() == yaxis
-
-    def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=None):
-        """
-        Check expected number of axes is drawn in expected layout
-
-        Parameters
-        ----------
-        axes : matplotlib Axes object, or its list-like
-        axes_num : number
-            expected number of axes. Unnecessary axes should be set to
-            invisible.
-        layout : tuple
-            expected layout, (expected number of rows , columns)
-        figsize : tuple
-            expected figsize. default is matplotlib default
-        """
-        from pandas.plotting._matplotlib.tools import flatten_axes
-
-        if figsize is None:
-            figsize = (6.4, 4.8)
-        visible_axes = self._flatten_visible(axes)
-
-        if axes_num is not None:
-            assert len(visible_axes) == axes_num
-            for ax in visible_axes:
-                # check something drawn on visible axes
-                assert len(ax.get_children()) > 0
-
-        if layout is not None:
-            result = self._get_axes_layout(flatten_axes(axes))
-            assert result == layout
-
-        tm.assert_numpy_array_equal(
-            visible_axes[0].figure.get_size_inches(),
-            np.array(figsize, dtype=np.float64),
-        )
-
-    def _get_axes_layout(self, axes):
+            if isinstance(result, np.ndarray):
+                result = tuple(result)
+
+            expected = conv.to_rgba(color)
+            assert result == expected
+
+
+def _check_text_labels(texts, expected):
+    """
+    Check each text has expected labels
+
+    Parameters
+    ----------
+    texts : matplotlib Text object, or its list-like
+        target text, or its list
+    expected : str or list-like which has the same length as texts
+        expected text label, or its list
+    """
+    if not is_list_like(texts):
+        assert texts.get_text() == expected
+    else:
+        labels = [t.get_text() for t in texts]
+        assert len(labels) == len(expected)
+        for label, e in zip(labels, expected):
+            assert label == e
+
+
+def _check_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None):
+    """
+    Check each axes has expected tick properties
+
+    Parameters
+    ----------
+    axes : matplotlib Axes object, or its list-like
+    xlabelsize : number
+        expected xticks font size
+    xrot : number
+        expected xticks rotation
+    ylabelsize : number
+        expected yticks font size
+    yrot : number
+        expected yticks rotation
+    """
+    from matplotlib.ticker import NullFormatter
+
+    axes = _flatten_visible(axes)
+    for ax in axes:
+        if xlabelsize is not None or xrot is not None:
+            if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
+                # If minor ticks has NullFormatter, rot / fontsize are not
+                # retained
+                labels = ax.get_xticklabels()
+            else:
+                labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
+
+            for label in labels:
+                if xlabelsize is not None:
+                    tm.assert_almost_equal(label.get_fontsize(), xlabelsize)
+                if xrot is not None:
+                    tm.assert_almost_equal(label.get_rotation(), xrot)
+
+        if ylabelsize is not None or yrot is not None:
+            if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
+                labels = ax.get_yticklabels()
+            else:
+                labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
+
+            for label in labels:
+                if ylabelsize is not None:
+                    tm.assert_almost_equal(label.get_fontsize(), ylabelsize)
+                if yrot is not None:
+                    tm.assert_almost_equal(label.get_rotation(), yrot)
+
+
+def _check_ax_scales(axes, xaxis="linear", yaxis="linear"):
+    """
+    Check each axes has expected scales
+
+    Parameters
+    ----------
+    axes : matplotlib Axes object, or its list-like
+    xaxis : {'linear', 'log'}
+        expected xaxis scale
+    yaxis : {'linear', 'log'}
+        expected yaxis scale
+    """
+    axes = _flatten_visible(axes)
+    for ax in axes:
+        assert ax.xaxis.get_scale() == xaxis
+        assert ax.yaxis.get_scale() == yaxis
+
+
+def _check_axes_shape(axes, axes_num=None, layout=None, figsize=None):
+    """
+    Check expected number of axes is drawn in expected layout
+
+    Parameters
+    ----------
+    axes : matplotlib Axes object, or its list-like
+    axes_num : number
+        expected number of axes. Unnecessary axes should be set to
+        invisible.
+    layout : tuple
+        expected layout, (expected number of rows , columns)
+    figsize : tuple
+        expected figsize. default is matplotlib default
+    """
+    from pandas.plotting._matplotlib.tools import flatten_axes
+
+    if figsize is None:
+        figsize = (6.4, 4.8)
+    visible_axes = _flatten_visible(axes)
+
+    if axes_num is not None:
+        assert len(visible_axes) == axes_num
+        for ax in visible_axes:
+            # check something drawn on visible axes
+            assert len(ax.get_children()) > 0
+
+    if layout is not None:
         x_set = set()
         y_set = set()
-        for ax in axes:
+        for ax in flatten_axes(axes):
             # check axes coordinates to estimate layout
             points = ax.get_position().get_points()
             x_set.add(points[0][0])
             y_set.add(points[0][1])
-        return (len(y_set), len(x_set))
-
-    def _flatten_visible(self, axes):
-        """
-        Flatten axes, and filter only visible
-
-        Parameters
-        ----------
-        axes : matplotlib Axes object, or its list-like
-
-        """
-        from pandas.plotting._matplotlib.tools import flatten_axes
-
-        axes = flatten_axes(axes)
-        axes = [ax for ax in axes if ax.get_visible()]
-        return axes
-
-    def _check_has_errorbars(self, axes, xerr=0, yerr=0):
-        """
-        Check axes has expected number of errorbars
-
-        Parameters
-        ----------
-        axes : matplotlib Axes object, or its list-like
-        xerr : number
-            expected number of x errorbar
-        yerr : number
-            expected number of y errorbar
-        """
-        axes = self._flatten_visible(axes)
-        for ax in axes:
-            containers = ax.containers
-            xerr_count = 0
-            yerr_count = 0
-            for c in containers:
-                has_xerr = getattr(c, "has_xerr", False)
-                has_yerr = getattr(c, "has_yerr", False)
-                if has_xerr:
-                    xerr_count += 1
-                if has_yerr:
-                    yerr_count += 1
-            assert xerr == xerr_count
-            assert yerr == yerr_count
-
-    def _check_box_return_type(
-        self, returned, return_type, expected_keys=None, check_ax_title=True
-    ):
-        """
-        Check box returned type is correct
-
-        Parameters
-        ----------
-        returned : object to be tested, returned from boxplot
-        return_type : str
-            return_type passed to boxplot
-        expected_keys : list-like, optional
-            group labels in subplot case. If not passed,
-            the function checks assuming boxplot uses single ax
-        check_ax_title : bool
-            Whether to check the ax.title is the same as expected_key
-            Intended to be checked by calling from ``boxplot``.
-            Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
-        """
-        from matplotlib.axes import Axes
-
-        types = {"dict": dict, "axes": Axes, "both": tuple}
-        if expected_keys is None:
-            # should be fixed when the returning default is changed
-            if return_type is None:
-                return_type = "dict"
-
-            assert isinstance(returned, types[return_type])
-            if return_type == "both":
-                assert isinstance(returned.ax, Axes)
-                assert isinstance(returned.lines, dict)
-        else:
-            # should be fixed when the returning default is changed
-            if return_type is None:
-                for r in self._flatten_visible(returned):
-                    assert isinstance(r, Axes)
-                return
-
-            assert isinstance(returned, Series)
-
-            assert sorted(returned.keys()) == sorted(expected_keys)
-            for key, value in returned.items():
-                assert isinstance(value, types[return_type])
-                # check returned dict has correct mapping
-                if return_type == "axes":
-                    if check_ax_title:
-                        assert value.get_title() == key
-                elif return_type == "both":
-                    if check_ax_title:
-                        assert value.ax.get_title() == key
-                    assert isinstance(value.ax, Axes)
-                    assert isinstance(value.lines, dict)
-                elif return_type == "dict":
-                    line = value["medians"][0]
-                    axes = line.axes
-                    if check_ax_title:
-                        assert axes.get_title() == key
-                else:
-                    raise AssertionError
-
-    def _check_grid_settings(self, obj, kinds, kws={}):
-        # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
-
-        import matplotlib as mpl
-
-        def is_grid_on():
-            xticks = self.plt.gca().xaxis.get_major_ticks()
-            yticks = self.plt.gca().yaxis.get_major_ticks()
-            xoff = all(not g.gridline.get_visible() for g in xticks)
-            yoff = all(not g.gridline.get_visible() for g in yticks)
-
-            return not (xoff and yoff)
-
-        spndx = 1
-        for kind in kinds:
-            self.plt.subplot(1, 4 * len(kinds), spndx)
+        result = (len(y_set), len(x_set))
+        assert result == layout
+
+    tm.assert_numpy_array_equal(
+        visible_axes[0].figure.get_size_inches(),
+        np.array(figsize, dtype=np.float64),
+    )
+
+
+def _flatten_visible(axes):
+    """
+    Flatten axes, and filter only visible
+
+    Parameters
+    ----------
+    axes : matplotlib Axes object, or its list-like
+
+    """
+    from pandas.plotting._matplotlib.tools import flatten_axes
+
+    axes = flatten_axes(axes)
+    axes = [ax for ax in axes if ax.get_visible()]
+    return axes
+
+
+def _check_has_errorbars(axes, xerr=0, yerr=0):
+    """
+    Check axes has expected number of errorbars
+
+    Parameters
+    ----------
+    axes : matplotlib Axes object, or its list-like
+    xerr : number
+        expected number of x errorbar
+    yerr : number
+        expected number of y errorbar
+    """
+    axes = _flatten_visible(axes)
+    for ax in axes:
+        containers = ax.containers
+        xerr_count = 0
+        yerr_count = 0
+        for c in containers:
+            has_xerr = getattr(c, "has_xerr", False)
+            has_yerr = getattr(c, "has_yerr", False)
+            if has_xerr:
+                xerr_count += 1
+            if has_yerr:
+                yerr_count += 1
+        assert xerr == xerr_count
+        assert yerr == yerr_count
+
+
+def _check_box_return_type(
+    returned, return_type, expected_keys=None, check_ax_title=True
+):
+    """
+    Check box returned type is correct
+
+    Parameters
+    ----------
+    returned : object to be tested, returned from boxplot
+    return_type : str
+        return_type passed to boxplot
+    expected_keys : list-like, optional
+        group labels in subplot case. If not passed,
+        the function checks assuming boxplot uses single ax
+    check_ax_title : bool
+        Whether to check the ax.title is the same as expected_key
+        Intended to be checked by calling from ``boxplot``.
+        Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
+    """
+    from matplotlib.axes import Axes
+
+    types = {"dict": dict, "axes": Axes, "both": tuple}
+    if expected_keys is None:
+        # should be fixed when the returning default is changed
+        if return_type is None:
+            return_type = "dict"
+
+        assert isinstance(returned, types[return_type])
+        if return_type == "both":
+            assert isinstance(returned.ax, Axes)
+            assert isinstance(returned.lines, dict)
+    else:
+        # should be fixed when the returning default is changed
+        if return_type is None:
+            for r in _flatten_visible(returned):
+                assert isinstance(r, Axes)
+            return
+
+        assert isinstance(returned, Series)
+
+        assert sorted(returned.keys()) == sorted(expected_keys)
+        for key, value in returned.items():
+            assert isinstance(value, types[return_type])
+            # check returned dict has correct mapping
+            if return_type == "axes":
+                if check_ax_title:
+                    assert value.get_title() == key
+            elif return_type == "both":
+                if check_ax_title:
+                    assert value.ax.get_title() == key
+                assert isinstance(value.ax, Axes)
+                assert isinstance(value.lines, dict)
+            elif return_type == "dict":
+                line = value["medians"][0]
+                axes = line.axes
+                if check_ax_title:
+                    assert axes.get_title() == key
+            else:
+                raise AssertionError
+
+
+def _check_grid_settings(obj, kinds, kws={}):
+    # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
+
+    import matplotlib as mpl
+
+    def is_grid_on():
+        xticks = mpl.pyplot.gca().xaxis.get_major_ticks()
+        yticks = mpl.pyplot.gca().yaxis.get_major_ticks()
+        xoff = all(not g.gridline.get_visible() for g in xticks)
+        yoff = all(not g.gridline.get_visible() for g in yticks)
+
+        return not (xoff and yoff)
+
+    spndx = 1
+    for kind in kinds:
+        mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
+        spndx += 1
+        mpl.rc("axes", grid=False)
+        obj.plot(kind=kind, **kws)
+        assert not is_grid_on()
+        mpl.pyplot.clf()
+
+        mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
+        spndx += 1
+        mpl.rc("axes", grid=True)
+        obj.plot(kind=kind, grid=False, **kws)
+        assert not is_grid_on()
+        mpl.pyplot.clf()
+
+        if kind not in ["pie", "hexbin", "scatter"]:
+            mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
             spndx += 1
-            mpl.rc("axes", grid=False)
+            mpl.rc("axes", grid=True)
             obj.plot(kind=kind, **kws)
-            assert not is_grid_on()
-            self.plt.clf()
+            assert is_grid_on()
+            mpl.pyplot.clf()
 
-            self.plt.subplot(1, 4 * len(kinds), spndx)
+            mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
             spndx += 1
-            mpl.rc("axes", grid=True)
-            obj.plot(kind=kind, grid=False, **kws)
-            assert not is_grid_on()
-            self.plt.clf()
-
-            if kind not in ["pie", "hexbin", "scatter"]:
-                self.plt.subplot(1, 4 * len(kinds), spndx)
-                spndx += 1
-                mpl.rc("axes", grid=True)
-                obj.plot(kind=kind, **kws)
-                assert is_grid_on()
-                self.plt.clf()
-
-                self.plt.subplot(1, 4 * len(kinds), spndx)
-                spndx += 1
-                mpl.rc("axes", grid=False)
-                obj.plot(kind=kind, grid=True, **kws)
-                assert is_grid_on()
-                self.plt.clf()
-
-    def _unpack_cycler(self, rcParams, field="color"):
-        """
-        Auxiliary function for correctly unpacking cycler after MPL >= 1.5
-        """
-        return [v[field] for v in rcParams["axes.prop_cycle"]]
-
-    def get_x_axis(self, ax):
-        return ax._shared_axes["x"]
-
-    def get_y_axis(self, ax):
-        return ax._shared_axes["y"]
+            mpl.rc("axes", grid=False)
+            obj.plot(kind=kind, grid=True, **kws)
+            assert is_grid_on()
+            mpl.pyplot.clf()
+
+
+def _unpack_cycler(rcParams, field="color"):
+    """
+    Auxiliary function for correctly unpacking cycler after MPL >= 1.5
+    """
+    return [v[field] for v in rcParams["axes.prop_cycle"]]
+
+
+def get_x_axis(ax):
+    return ax._shared_axes["x"]
+
+
+def get_y_axis(ax):
+    return ax._shared_axes["y"]
 
 
 def _check_plot_works(f, default_axes=False, **kwargs):
diff --git a/pandas/tests/plotting/conftest.py b/pandas/tests/plotting/conftest.py
index 14c413f96c4ba..6f77356eb3762 100644
--- a/pandas/tests/plotting/conftest.py
+++ b/pandas/tests/plotting/conftest.py
@@ -7,6 +7,30 @@
 )
 
 
+@pytest.fixture(autouse=True)
+def non_interactive():
+    mpl = pytest.importorskip("matplotlib")
+    mpl.use("template")
+    yield
+
+
+@pytest.fixture(autouse=True)
+def reset_rcParams():
+    mpl = pytest.importorskip("matplotlib")
+    with mpl.rc_context():
+        yield
+
+
+@pytest.fixture(autouse=True)
+def close_all_figures():
+    # https://stackoverflow.com/q/31156578
+    yield
+    plt = pytest.importorskip("matplotlib.pyplot")
+    plt.cla()
+    plt.clf()
+    plt.close("all")
+
+
 @pytest.fixture
 def hist_df():
     n = 100
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index ded3c1142f27b..01762e39c36c1 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -29,15 +29,27 @@
 )
 import pandas._testing as tm
 from pandas.tests.plotting.common import (
-    TestPlotBase,
+    _check_ax_scales,
+    _check_axes_shape,
+    _check_box_return_type,
+    _check_colors,
+    _check_data,
+    _check_grid_settings,
+    _check_has_errorbars,
+    _check_legend_labels,
     _check_plot_works,
+    _check_text_labels,
+    _check_ticks_props,
+    _check_visible,
+    get_y_axis,
 )
 
 from pandas.io.formats.printing import pprint_thing
 
+mpl = pytest.importorskip("matplotlib")
 
-@td.skip_if_no_mpl
-class TestDataFramePlots(TestPlotBase):
+
+class TestDataFramePlots:
     @pytest.mark.xfail(reason="Api changed in 3.6.0")
     @pytest.mark.slow
     def test_plot(self):
@@ -46,7 +58,7 @@ def test_plot(self):
 
         # _check_plot_works adds an ax so use default_axes=True to avoid warning
         axes = _check_plot_works(df.plot, default_axes=True, subplots=True)
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
+        _check_axes_shape(axes, axes_num=4, layout=(4, 1))
 
         axes = _check_plot_works(
             df.plot,
@@ -54,7 +66,7 @@ def test_plot(self):
             subplots=True,
             layout=(-1, 2),
         )
-        self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=4, layout=(2, 2))
 
         axes = _check_plot_works(
             df.plot,
@@ -62,8 +74,8 @@ def test_plot(self):
             subplots=True,
             use_index=False,
         )
-        self._check_ticks_props(axes, xrot=0)
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
+        _check_ticks_props(axes, xrot=0)
+        _check_axes_shape(axes, axes_num=4, layout=(4, 1))
 
         df = DataFrame({"x": [1, 2], "y": [3, 4]})
         msg = "'Line2D' object has no property 'blarg'"
@@ -73,7 +85,7 @@ def test_plot(self):
         df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
 
         ax = _check_plot_works(df.plot, use_index=True)
-        self._check_ticks_props(ax, xrot=0)
+        _check_ticks_props(ax, xrot=0)
         _check_plot_works(df.plot, yticks=[1, 5, 10])
         _check_plot_works(df.plot, xticks=[1, 5, 10])
         _check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
@@ -86,25 +98,25 @@ def test_plot(self):
         # present).  see: https://github.com/pandas-dev/pandas/issues/9737
 
         axes = df.plot(subplots=True, title="blah")
-        self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+        _check_axes_shape(axes, axes_num=3, layout=(3, 1))
         # axes[0].figure.savefig("test.png")
         for ax in axes[:2]:
-            self._check_visible(ax.xaxis)  # xaxis must be visible for grid
-            self._check_visible(ax.get_xticklabels(), visible=False)
-            self._check_visible(ax.get_xticklabels(minor=True), visible=False)
-            self._check_visible([ax.xaxis.get_label()], visible=False)
+            _check_visible(ax.xaxis)  # xaxis must be visible for grid
+            _check_visible(ax.get_xticklabels(), visible=False)
+            _check_visible(ax.get_xticklabels(minor=True), visible=False)
+            _check_visible([ax.xaxis.get_label()], visible=False)
         for ax in [axes[2]]:
-            self._check_visible(ax.xaxis)
-            self._check_visible(ax.get_xticklabels())
-            self._check_visible([ax.xaxis.get_label()])
-            self._check_ticks_props(ax, xrot=0)
+            _check_visible(ax.xaxis)
+            _check_visible(ax.get_xticklabels())
+            _check_visible([ax.xaxis.get_label()])
+            _check_ticks_props(ax, xrot=0)
 
         _check_plot_works(df.plot, title="blah")
 
         tuples = zip(string.ascii_letters[:10], range(10))
         df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
         ax = _check_plot_works(df.plot, use_index=True)
-        self._check_ticks_props(ax, xrot=0)
+        _check_ticks_props(ax, xrot=0)
 
         # unicode
         index = MultiIndex.from_tuples(
@@ -130,13 +142,13 @@ def test_plot(self):
         # Test with single column
         df = DataFrame({"x": np.random.rand(10)})
         axes = _check_plot_works(df.plot.bar, subplots=True)
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
 
         axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
         # When ax is supplied and required number of axes is 1,
         # passed ax should be used:
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         axes = df.plot.bar(subplots=True, ax=ax)
         assert len(axes) == 1
         result = ax.axes
@@ -190,7 +202,7 @@ def test_nonnumeric_exclude(self):
     def test_implicit_label(self):
         df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
         ax = df.plot(x="a", y="b")
-        self._check_text_labels(ax.xaxis.get_label(), "a")
+        _check_text_labels(ax.xaxis.get_label(), "a")
 
     def test_donot_overwrite_index_name(self):
         # GH 8494
@@ -202,23 +214,23 @@ def test_donot_overwrite_index_name(self):
     def test_plot_xy(self):
         # columns.inferred_type == 'string'
         df = tm.makeTimeDataFrame()
-        self._check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot())
-        self._check_data(df.plot(x=0), df.set_index("A").plot())
-        self._check_data(df.plot(y=0), df.B.plot())
-        self._check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot())
-        self._check_data(df.plot(x="A"), df.set_index("A").plot())
-        self._check_data(df.plot(y="B"), df.B.plot())
+        _check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot())
+        _check_data(df.plot(x=0), df.set_index("A").plot())
+        _check_data(df.plot(y=0), df.B.plot())
+        _check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot())
+        _check_data(df.plot(x="A"), df.set_index("A").plot())
+        _check_data(df.plot(y="B"), df.B.plot())
 
         # columns.inferred_type == 'integer'
         df.columns = np.arange(1, len(df.columns) + 1)
-        self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
-        self._check_data(df.plot(x=1), df.set_index(1).plot())
-        self._check_data(df.plot(y=1), df[1].plot())
+        _check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
+        _check_data(df.plot(x=1), df.set_index(1).plot())
+        _check_data(df.plot(y=1), df[1].plot())
 
         # figsize and title
         ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8))
-        self._check_text_labels(ax.title, "Test")
-        self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0))
+        _check_text_labels(ax.title, "Test")
+        _check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0))
 
         # columns.inferred_type == 'mixed'
         # TODO add MultiIndex test
@@ -230,15 +242,15 @@ def test_logscales(self, input_log, expected_log):
         df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
 
         ax = df.plot(logy=input_log)
-        self._check_ax_scales(ax, yaxis=expected_log)
+        _check_ax_scales(ax, yaxis=expected_log)
         assert ax.get_yscale() == expected_log
 
         ax = df.plot(logx=input_log)
-        self._check_ax_scales(ax, xaxis=expected_log)
+        _check_ax_scales(ax, xaxis=expected_log)
         assert ax.get_xscale() == expected_log
 
         ax = df.plot(loglog=input_log)
-        self._check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log)
+        _check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log)
         assert ax.get_xscale() == expected_log
         assert ax.get_yscale() == expected_log
 
@@ -256,14 +268,14 @@ def test_xcompat(self):
         ax = df.plot(x_compat=True)
         lines = ax.get_lines()
         assert not isinstance(lines[0].get_xdata(), PeriodIndex)
-        self._check_ticks_props(ax, xrot=30)
+        _check_ticks_props(ax, xrot=30)
 
         tm.close()
         plotting.plot_params["xaxis.compat"] = True
         ax = df.plot()
         lines = ax.get_lines()
         assert not isinstance(lines[0].get_xdata(), PeriodIndex)
-        self._check_ticks_props(ax, xrot=30)
+        _check_ticks_props(ax, xrot=30)
 
         tm.close()
         plotting.plot_params["x_compat"] = False
@@ -279,14 +291,14 @@ def test_xcompat(self):
             ax = df.plot()
             lines = ax.get_lines()
             assert not isinstance(lines[0].get_xdata(), PeriodIndex)
-            self._check_ticks_props(ax, xrot=30)
+            _check_ticks_props(ax, xrot=30)
 
         tm.close()
         ax = df.plot()
         lines = ax.get_lines()
         assert not isinstance(lines[0].get_xdata(), PeriodIndex)
         assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
-        self._check_ticks_props(ax, xrot=0)
+        _check_ticks_props(ax, xrot=0)
 
     def test_period_compat(self):
         # GH 9012
@@ -298,7 +310,7 @@ def test_period_compat(self):
         )
 
         df.plot()
-        self.plt.axhline(y=0)
+        mpl.pyplot.axhline(y=0)
         tm.close()
 
     def test_unsorted_index(self):
@@ -465,7 +477,7 @@ def test_line_lim(self):
         assert xmax >= lines[0].get_data()[0][-1]
 
         axes = df.plot(secondary_y=True, subplots=True)
-        self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+        _check_axes_shape(axes, axes_num=3, layout=(3, 1))
         for ax in axes:
             assert hasattr(ax, "left_ax")
             assert not hasattr(ax, "right_ax")
@@ -500,13 +512,13 @@ def test_area_lim(self, stacked):
     def test_area_sharey_dont_overwrite(self):
         # GH37942
         df = DataFrame(np.random.rand(4, 2), columns=["x", "y"])
-        fig, (ax1, ax2) = self.plt.subplots(1, 2, sharey=True)
+        fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True)
 
         df.plot(ax=ax1, kind="area")
         df.plot(ax=ax2, kind="area")
 
-        assert self.get_y_axis(ax1).joined(ax1, ax2)
-        assert self.get_y_axis(ax2).joined(ax1, ax2)
+        assert get_y_axis(ax1).joined(ax1, ax2)
+        assert get_y_axis(ax2).joined(ax1, ax2)
 
     def test_bar_linewidth(self):
         df = DataFrame(np.random.randn(5, 5))
@@ -523,7 +535,7 @@ def test_bar_linewidth(self):
 
         # subplots
         axes = df.plot.bar(linewidth=2, subplots=True)
-        self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
+        _check_axes_shape(axes, axes_num=5, layout=(5, 1))
         for ax in axes:
             for r in ax.patches:
                 assert r.get_linewidth() == 2
@@ -656,7 +668,7 @@ def test_plot_scatter(self):
 
         # GH 6951
         axes = df.plot(x="x", y="y", kind="scatter", subplots=True)
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
 
     def test_raise_error_on_datetime_time_data(self):
         # GH 8113, datetime.time type is not supported by matplotlib in scatter
@@ -751,7 +763,7 @@ def test_plot_scatter_with_c(self):
         # verify that we can still plot a solid color
         ax = df.plot.scatter(x=0, y=1, c="red")
         assert ax.collections[0].colorbar is None
-        self._check_colors(ax.collections, facecolors=["r"])
+        _check_colors(ax.collections, facecolors=["r"])
 
         # Ensure that we can pass an np.array straight through to matplotlib,
         # this functionality was accidentally removed previously.
@@ -825,16 +837,16 @@ def test_plot_bar(self):
 
         df = DataFrame({"a": [0, 1], "b": [1, 0]})
         ax = _check_plot_works(df.plot.bar)
-        self._check_ticks_props(ax, xrot=90)
+        _check_ticks_props(ax, xrot=90)
 
         ax = df.plot.bar(rot=35, fontsize=10)
-        self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
+        _check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
 
         ax = _check_plot_works(df.plot.barh)
-        self._check_ticks_props(ax, yrot=0)
+        _check_ticks_props(ax, yrot=0)
 
         ax = df.plot.barh(rot=55, fontsize=11)
-        self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
+        _check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
 
     def test_boxplot(self, hist_df):
         df = hist_df
@@ -843,7 +855,7 @@ def test_boxplot(self, hist_df):
         labels = [pprint_thing(c) for c in numeric_cols]
 
         ax = _check_plot_works(df.plot.box)
-        self._check_text_labels(ax.get_xticklabels(), labels)
+        _check_text_labels(ax.get_xticklabels(), labels)
         tm.assert_numpy_array_equal(
             ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1)
         )
@@ -851,7 +863,7 @@ def test_boxplot(self, hist_df):
         tm.close()
 
         axes = series.plot.box(rot=40)
-        self._check_ticks_props(axes, xrot=40, yrot=0)
+        _check_ticks_props(axes, xrot=40, yrot=0)
         tm.close()
 
         ax = _check_plot_works(series.plot.box)
@@ -860,7 +872,7 @@ def test_boxplot(self, hist_df):
         ax = df.plot.box(positions=positions)
         numeric_cols = df._get_numeric_data().columns
         labels = [pprint_thing(c) for c in numeric_cols]
-        self._check_text_labels(ax.get_xticklabels(), labels)
+        _check_text_labels(ax.get_xticklabels(), labels)
         tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
         assert len(ax.lines) == 7 * len(numeric_cols)
 
@@ -871,8 +883,8 @@ def test_boxplot_vertical(self, hist_df):
 
         # if horizontal, yticklabels are rotated
         ax = df.plot.box(rot=50, fontsize=8, vert=False)
-        self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
-        self._check_text_labels(ax.get_yticklabels(), labels)
+        _check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
+        _check_text_labels(ax.get_yticklabels(), labels)
         assert len(ax.lines) == 7 * len(numeric_cols)
 
         axes = _check_plot_works(
@@ -882,15 +894,15 @@ def test_boxplot_vertical(self, hist_df):
             vert=False,
             logx=True,
         )
-        self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
-        self._check_ax_scales(axes, xaxis="log")
+        _check_axes_shape(axes, axes_num=3, layout=(1, 3))
+        _check_ax_scales(axes, xaxis="log")
         for ax, label in zip(axes, labels):
-            self._check_text_labels(ax.get_yticklabels(), [label])
+            _check_text_labels(ax.get_yticklabels(), [label])
             assert len(ax.lines) == 7
 
         positions = np.array([3, 2, 8])
         ax = df.plot.box(positions=positions, vert=False)
-        self._check_text_labels(ax.get_yticklabels(), labels)
+        _check_text_labels(ax.get_yticklabels(), labels)
         tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
         assert len(ax.lines) == 7 * len(numeric_cols)
 
@@ -905,27 +917,27 @@ def test_boxplot_return_type(self):
             df.plot.box(return_type="not_a_type")
 
         result = df.plot.box(return_type="dict")
-        self._check_box_return_type(result, "dict")
+        _check_box_return_type(result, "dict")
 
         result = df.plot.box(return_type="axes")
-        self._check_box_return_type(result, "axes")
+        _check_box_return_type(result, "axes")
 
         result = df.plot.box()  # default axes
-        self._check_box_return_type(result, "axes")
+        _check_box_return_type(result, "axes")
 
         result = df.plot.box(return_type="both")
-        self._check_box_return_type(result, "both")
+        _check_box_return_type(result, "both")
 
     @td.skip_if_no_scipy
     def test_kde_df(self):
         df = DataFrame(np.random.randn(100, 4))
         ax = _check_plot_works(df.plot, kind="kde")
         expected = [pprint_thing(c) for c in df.columns]
-        self._check_legend_labels(ax, labels=expected)
-        self._check_ticks_props(ax, xrot=0)
+        _check_legend_labels(ax, labels=expected)
+        _check_ticks_props(ax, xrot=0)
 
         ax = df.plot(kind="kde", rot=20, fontsize=5)
-        self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
+        _check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
 
         axes = _check_plot_works(
             df.plot,
@@ -933,10 +945,10 @@ def test_kde_df(self):
             kind="kde",
             subplots=True,
         )
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
+        _check_axes_shape(axes, axes_num=4, layout=(4, 1))
 
         axes = df.plot(kind="kde", logy=True, subplots=True)
-        self._check_ax_scales(axes, yaxis="log")
+        _check_ax_scales(axes, yaxis="log")
 
     @td.skip_if_no_scipy
     def test_kde_missing_vals(self):
@@ -952,7 +964,7 @@ def test_hist_df(self):
 
         ax = _check_plot_works(df.plot.hist)
         expected = [pprint_thing(c) for c in df.columns]
-        self._check_legend_labels(ax, labels=expected)
+        _check_legend_labels(ax, labels=expected)
 
         axes = _check_plot_works(
             df.plot.hist,
@@ -960,11 +972,11 @@ def test_hist_df(self):
             subplots=True,
             logy=True,
         )
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
-        self._check_ax_scales(axes, yaxis="log")
+        _check_axes_shape(axes, axes_num=4, layout=(4, 1))
+        _check_ax_scales(axes, yaxis="log")
 
         axes = series.plot.hist(rot=40)
-        self._check_ticks_props(axes, xrot=40, yrot=0)
+        _check_ticks_props(axes, xrot=40, yrot=0)
         tm.close()
 
         ax = series.plot.hist(cumulative=True, bins=4, density=True)
@@ -981,7 +993,7 @@ def test_hist_df(self):
 
         # if horizontal, yticklabels are rotated
         axes = df.plot.hist(rot=50, fontsize=8, orientation="horizontal")
-        self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
+        _check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
 
     @pytest.mark.parametrize(
         "weights", [0.1 * np.ones(shape=(100,)), 0.1 * np.ones(shape=(100, 2))]
@@ -1293,7 +1305,7 @@ def test_y_listlike(self, x, y, lbl, colors):
 
         ax = df.plot(x=x, y=y, label=lbl, color=colors)
         assert len(ax.lines) == len(y)
-        self._check_colors(ax.get_lines(), linecolors=colors)
+        _check_colors(ax.get_lines(), linecolors=colors)
 
     @pytest.mark.parametrize("x,y,colnames", [(0, 1, ["A", "B"]), (1, 0, [0, 1])])
     def test_xy_args_integer(self, x, y, colnames):
@@ -1321,7 +1333,7 @@ def test_hexbin_basic(self):
         # is colorbar
         assert len(axes[0].figure.axes) == 2
         # return value is single axes
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
 
     def test_hexbin_with_c(self):
         df = DataFrame(
@@ -1368,10 +1380,10 @@ def test_pie_df(self):
             df.plot.pie()
 
         ax = _check_plot_works(df.plot.pie, y="Y")
-        self._check_text_labels(ax.texts, df.index)
+        _check_text_labels(ax.texts, df.index)
 
         ax = _check_plot_works(df.plot.pie, y=2)
-        self._check_text_labels(ax.texts, df.index)
+        _check_text_labels(ax.texts, df.index)
 
         axes = _check_plot_works(
             df.plot.pie,
@@ -1380,7 +1392,7 @@ def test_pie_df(self):
         )
         assert len(axes) == len(df.columns)
         for ax in axes:
-            self._check_text_labels(ax.texts, df.index)
+            _check_text_labels(ax.texts, df.index)
         for ax, ylabel in zip(axes, df.columns):
             assert ax.get_ylabel() == ylabel
 
@@ -1396,14 +1408,14 @@ def test_pie_df(self):
         assert len(axes) == len(df.columns)
 
         for ax in axes:
-            self._check_text_labels(ax.texts, labels)
-            self._check_colors(ax.patches, facecolors=color_args)
+            _check_text_labels(ax.texts, labels)
+            _check_colors(ax.patches, facecolors=color_args)
 
     def test_pie_df_nan(self):
         df = DataFrame(np.random.rand(4, 4))
         for i in range(4):
             df.iloc[i, i] = np.nan
-        fig, axes = self.plt.subplots(ncols=4)
+        fig, axes = mpl.pyplot.subplots(ncols=4)
 
         # GH 37668
         kwargs = {"normalize": True}
@@ -1434,25 +1446,25 @@ def test_errorbar_plot(self):
 
         # check line plots
         ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         ax = _check_plot_works(
             (df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True
         )
-        self._check_has_errorbars(ax, xerr=2, yerr=2)
+        _check_has_errorbars(ax, xerr=2, yerr=2)
 
         # yerr is raw error values
         ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4)
-        self._check_has_errorbars(ax, xerr=0, yerr=1)
+        _check_has_errorbars(ax, xerr=0, yerr=1)
 
         ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         # yerr is column name
         for yerr in ["yerr", "誤差"]:
@@ -1460,10 +1472,10 @@ def test_errorbar_plot(self):
             s_df[yerr] = np.ones(12) * 0.2
 
             ax = _check_plot_works(s_df.plot, yerr=yerr)
-            self._check_has_errorbars(ax, xerr=0, yerr=2)
+            _check_has_errorbars(ax, xerr=0, yerr=2)
 
             ax = _check_plot_works(s_df.plot, y="y", x="x", yerr=yerr)
-            self._check_has_errorbars(ax, xerr=0, yerr=1)
+            _check_has_errorbars(ax, xerr=0, yerr=1)
 
         with tm.external_error_raised(ValueError):
             df.plot(yerr=np.random.randn(11))
@@ -1481,19 +1493,19 @@ def test_errorbar_plot_different_kinds(self, kind):
         df_err = DataFrame(d_err)
 
         ax = _check_plot_works(df.plot, yerr=df_err["x"], kind=kind)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
-        self._check_has_errorbars(ax, xerr=2, yerr=2)
+        _check_has_errorbars(ax, xerr=2, yerr=2)
 
         ax = _check_plot_works(df.plot, yerr=df_err["x"], xerr=df_err["x"], kind=kind)
-        self._check_has_errorbars(ax, xerr=2, yerr=2)
+        _check_has_errorbars(ax, xerr=2, yerr=2)
 
         ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
-        self._check_has_errorbars(ax, xerr=2, yerr=2)
+        _check_has_errorbars(ax, xerr=2, yerr=2)
 
         axes = _check_plot_works(
             df.plot,
@@ -1503,7 +1515,7 @@ def test_errorbar_plot_different_kinds(self, kind):
             subplots=True,
             kind=kind,
         )
-        self._check_has_errorbars(axes, xerr=1, yerr=1)
+        _check_has_errorbars(axes, xerr=1, yerr=1)
 
     @pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError)
     def test_errorbar_plot_iterator(self):
@@ -1513,16 +1525,16 @@ def test_errorbar_plot_iterator(self):
 
             # yerr is iterator
             ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
-            self._check_has_errorbars(ax, xerr=0, yerr=2)
+            _check_has_errorbars(ax, xerr=0, yerr=2)
 
     def test_errorbar_with_integer_column_names(self):
         # test with integer column names
         df = DataFrame(np.abs(np.random.randn(10, 2)))
         df_err = DataFrame(np.abs(np.random.randn(10, 2)))
         ax = _check_plot_works(df.plot, yerr=df_err)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
         ax = _check_plot_works(df.plot, y=0, yerr=1)
-        self._check_has_errorbars(ax, xerr=0, yerr=1)
+        _check_has_errorbars(ax, xerr=0, yerr=1)
 
     @pytest.mark.slow
     def test_errorbar_with_partial_columns(self):
@@ -1531,13 +1543,13 @@ def test_errorbar_with_partial_columns(self):
         kinds = ["line", "bar"]
         for kind in kinds:
             ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
-            self._check_has_errorbars(ax, xerr=0, yerr=2)
+            _check_has_errorbars(ax, xerr=0, yerr=2)
 
         ix = date_range("1/1/2000", periods=10, freq="M")
         df.set_index(ix, inplace=True)
         df_err.set_index(ix, inplace=True)
         ax = _check_plot_works(df.plot, yerr=df_err, kind="line")
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
         df = DataFrame(d)
@@ -1545,7 +1557,7 @@ def test_errorbar_with_partial_columns(self):
         df_err = DataFrame(d_err)
         for err in [d_err, df_err]:
             ax = _check_plot_works(df.plot, yerr=err)
-            self._check_has_errorbars(ax, xerr=0, yerr=1)
+            _check_has_errorbars(ax, xerr=0, yerr=1)
 
     @pytest.mark.parametrize("kind", ["line", "bar", "barh"])
     def test_errorbar_timeseries(self, kind):
@@ -1558,19 +1570,19 @@ def test_errorbar_timeseries(self, kind):
         tdf_err = DataFrame(d_err, index=ix)
 
         ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind)
-        self._check_has_errorbars(ax, xerr=0, yerr=1)
+        _check_has_errorbars(ax, xerr=0, yerr=1)
 
         ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind)
-        self._check_has_errorbars(ax, xerr=0, yerr=1)
+        _check_has_errorbars(ax, xerr=0, yerr=1)
 
         ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
-        self._check_has_errorbars(ax, xerr=0, yerr=2)
+        _check_has_errorbars(ax, xerr=0, yerr=2)
 
         axes = _check_plot_works(
             tdf.plot,
@@ -1579,7 +1591,7 @@ def test_errorbar_timeseries(self, kind):
             yerr=tdf_err,
             subplots=True,
         )
-        self._check_has_errorbars(axes, xerr=0, yerr=1)
+        _check_has_errorbars(axes, xerr=0, yerr=1)
 
     def test_errorbar_asymmetrical(self):
         np.random.seed(0)
@@ -1623,14 +1635,14 @@ def test_errorbar_scatter(self):
         )
 
         ax = _check_plot_works(df.plot.scatter, x="x", y="y")
-        self._check_has_errorbars(ax, xerr=0, yerr=0)
+        _check_has_errorbars(ax, xerr=0, yerr=0)
         ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err)
-        self._check_has_errorbars(ax, xerr=1, yerr=0)
+        _check_has_errorbars(ax, xerr=1, yerr=0)
 
         ax = _check_plot_works(df.plot.scatter, x="x", y="y", yerr=df_err)
-        self._check_has_errorbars(ax, xerr=0, yerr=1)
+        _check_has_errorbars(ax, xerr=0, yerr=1)
         ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err, yerr=df_err)
-        self._check_has_errorbars(ax, xerr=1, yerr=1)
+        _check_has_errorbars(ax, xerr=1, yerr=1)
 
         def _check_errorbar_color(containers, expected, has_err="has_xerr"):
             lines = []
@@ -1641,21 +1653,19 @@ def _check_errorbar_color(containers, expected, has_err="has_xerr"):
                 else:
                     lines.append(el)
             err_lines = [x for x in lines if x in ax.collections]
-            self._check_colors(
-                err_lines, linecolors=np.array([expected] * len(err_lines))
-            )
+            _check_colors(err_lines, linecolors=np.array([expected] * len(err_lines)))
 
         # GH 8081
         df = DataFrame(
             np.abs(np.random.randn(10, 5)), columns=["a", "b", "c", "d", "e"]
         )
         ax = df.plot.scatter(x="a", y="b", xerr="d", yerr="e", c="red")
-        self._check_has_errorbars(ax, xerr=1, yerr=1)
+        _check_has_errorbars(ax, xerr=1, yerr=1)
         _check_errorbar_color(ax.containers, "red", has_err="has_xerr")
         _check_errorbar_color(ax.containers, "red", has_err="has_yerr")
 
         ax = df.plot.scatter(x="a", y="b", yerr="e", color="green")
-        self._check_has_errorbars(ax, xerr=0, yerr=1)
+        _check_has_errorbars(ax, xerr=0, yerr=1)
         _check_errorbar_color(ax.containers, "green", has_err="has_yerr")
 
     def test_scatter_unknown_colormap(self):
@@ -1685,13 +1695,13 @@ def test_sharex_and_ax(self):
         def _check(axes):
             for ax in axes:
                 assert len(ax.lines) == 1
-                self._check_visible(ax.get_yticklabels(), visible=True)
+                _check_visible(ax.get_yticklabels(), visible=True)
             for ax in [axes[0], axes[2]]:
-                self._check_visible(ax.get_xticklabels(), visible=False)
-                self._check_visible(ax.get_xticklabels(minor=True), visible=False)
+                _check_visible(ax.get_xticklabels(), visible=False)
+                _check_visible(ax.get_xticklabels(minor=True), visible=False)
             for ax in [axes[1], axes[3]]:
-                self._check_visible(ax.get_xticklabels(), visible=True)
-                self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+                _check_visible(ax.get_xticklabels(), visible=True)
+                _check_visible(ax.get_xticklabels(minor=True), visible=True)
 
         for ax in axes:
             df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
@@ -1713,9 +1723,9 @@ def _check(axes):
         gs.tight_layout(plt.gcf())
         for ax in axes:
             assert len(ax.lines) == 1
-            self._check_visible(ax.get_yticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(minor=True), visible=True)
         tm.close()
 
     def test_sharey_and_ax(self):
@@ -1738,12 +1748,12 @@ def test_sharey_and_ax(self):
         def _check(axes):
             for ax in axes:
                 assert len(ax.lines) == 1
-                self._check_visible(ax.get_xticklabels(), visible=True)
-                self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+                _check_visible(ax.get_xticklabels(), visible=True)
+                _check_visible(ax.get_xticklabels(minor=True), visible=True)
             for ax in [axes[0], axes[1]]:
-                self._check_visible(ax.get_yticklabels(), visible=True)
+                _check_visible(ax.get_yticklabels(), visible=True)
             for ax in [axes[2], axes[3]]:
-                self._check_visible(ax.get_yticklabels(), visible=False)
+                _check_visible(ax.get_yticklabels(), visible=False)
 
         for ax in axes:
             df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
@@ -1767,44 +1777,38 @@ def _check(axes):
         gs.tight_layout(plt.gcf())
         for ax in axes:
             assert len(ax.lines) == 1
-            self._check_visible(ax.get_yticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(minor=True), visible=True)
 
     @td.skip_if_no_scipy
-    def test_memory_leak(self):
+    @pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds)
+    def test_memory_leak(self, kind):
         """Check that every plot type gets properly collected."""
-        results = {}
-        for kind in plotting.PlotAccessor._all_kinds:
-            args = {}
-            if kind in ["hexbin", "scatter", "pie"]:
-                df = DataFrame(
-                    {
-                        "A": np.random.uniform(size=20),
-                        "B": np.random.uniform(size=20),
-                        "C": np.arange(20) + np.random.uniform(size=20),
-                    }
-                )
-                args = {"x": "A", "y": "B"}
-            elif kind == "area":
-                df = tm.makeTimeDataFrame().abs()
-            else:
-                df = tm.makeTimeDataFrame()
-
-            # Use a weakref so we can see if the object gets collected without
-            # also preventing it from being collected
-            results[kind] = weakref.proxy(df.plot(kind=kind, **args))
+        args = {}
+        if kind in ["hexbin", "scatter", "pie"]:
+            df = DataFrame(
+                {
+                    "A": np.random.uniform(size=20),
+                    "B": np.random.uniform(size=20),
+                    "C": np.arange(20) + np.random.uniform(size=20),
+                }
+            )
+            args = {"x": "A", "y": "B"}
+        elif kind == "area":
+            df = tm.makeTimeDataFrame().abs()
+        else:
+            df = tm.makeTimeDataFrame()
+
+        # Use a weakref so we can see if the object gets collected without
+        # also preventing it from being collected
+        ref = weakref.ref(df.plot(kind=kind, **args))
 
         # have matplotlib delete all the figures
         tm.close()
         # force a garbage collection
         gc.collect()
-        msg = "weakly-referenced object no longer exists"
-        for result_value in results.values():
-            # check that every plot was collected
-            with pytest.raises(ReferenceError, match=msg):
-                # need to actually access something to get an error
-                result_value.lines
+        assert ref() is None
 
     def test_df_gridspec_patterns(self):
         # GH 10819
@@ -1835,9 +1839,9 @@ def _get_horizontal_grid():
             ax2 = df.plot(ax=ax2)
             assert len(ax2.lines) == 2
             for ax in [ax1, ax2]:
-                self._check_visible(ax.get_yticklabels(), visible=True)
-                self._check_visible(ax.get_xticklabels(), visible=True)
-                self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+                _check_visible(ax.get_yticklabels(), visible=True)
+                _check_visible(ax.get_xticklabels(), visible=True)
+                _check_visible(ax.get_xticklabels(minor=True), visible=True)
             tm.close()
 
         # subplots=True
@@ -1846,9 +1850,9 @@ def _get_horizontal_grid():
             assert len(ax1.lines) == 1
             assert len(ax2.lines) == 1
             for ax in axes:
-                self._check_visible(ax.get_yticklabels(), visible=True)
-                self._check_visible(ax.get_xticklabels(), visible=True)
-                self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+                _check_visible(ax.get_yticklabels(), visible=True)
+                _check_visible(ax.get_xticklabels(), visible=True)
+                _check_visible(ax.get_xticklabels(minor=True), visible=True)
             tm.close()
 
         # vertical / subplots / sharex=True / sharey=True
@@ -1859,12 +1863,12 @@ def _get_horizontal_grid():
         assert len(axes[1].lines) == 1
         for ax in [ax1, ax2]:
             # yaxis are visible because there is only one column
-            self._check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
         # xaxis of axes0 (top) are hidden
-        self._check_visible(axes[0].get_xticklabels(), visible=False)
-        self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
-        self._check_visible(axes[1].get_xticklabels(), visible=True)
-        self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
+        _check_visible(axes[0].get_xticklabels(), visible=False)
+        _check_visible(axes[0].get_xticklabels(minor=True), visible=False)
+        _check_visible(axes[1].get_xticklabels(), visible=True)
+        _check_visible(axes[1].get_xticklabels(minor=True), visible=True)
         tm.close()
 
         # horizontal / subplots / sharex=True / sharey=True
@@ -1873,13 +1877,13 @@ def _get_horizontal_grid():
             axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
         assert len(axes[0].lines) == 1
         assert len(axes[1].lines) == 1
-        self._check_visible(axes[0].get_yticklabels(), visible=True)
+        _check_visible(axes[0].get_yticklabels(), visible=True)
         # yaxis of axes1 (right) are hidden
-        self._check_visible(axes[1].get_yticklabels(), visible=False)
+        _check_visible(axes[1].get_yticklabels(), visible=False)
         for ax in [ax1, ax2]:
             # xaxis are visible because there is only one column
-            self._check_visible(ax.get_xticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+            _check_visible(ax.get_xticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(minor=True), visible=True)
         tm.close()
 
         # boxed
@@ -1898,9 +1902,9 @@ def _get_boxed_grid():
         for ax in axes:
             assert len(ax.lines) == 1
             # axis are visible because these are not shared
-            self._check_visible(ax.get_yticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(minor=True), visible=True)
         tm.close()
 
         # subplots / sharex=True / sharey=True
@@ -1910,20 +1914,20 @@ def _get_boxed_grid():
         for ax in axes:
             assert len(ax.lines) == 1
         for ax in [axes[0], axes[2]]:  # left column
-            self._check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
         for ax in [axes[1], axes[3]]:  # right column
-            self._check_visible(ax.get_yticklabels(), visible=False)
+            _check_visible(ax.get_yticklabels(), visible=False)
         for ax in [axes[0], axes[1]]:  # top row
-            self._check_visible(ax.get_xticklabels(), visible=False)
-            self._check_visible(ax.get_xticklabels(minor=True), visible=False)
+            _check_visible(ax.get_xticklabels(), visible=False)
+            _check_visible(ax.get_xticklabels(minor=True), visible=False)
         for ax in [axes[2], axes[3]]:  # bottom row
-            self._check_visible(ax.get_xticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+            _check_visible(ax.get_xticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(minor=True), visible=True)
         tm.close()
 
     def test_df_grid_settings(self):
         # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
-        self._check_grid_settings(
+        _check_grid_settings(
             DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}),
             plotting.PlotAccessor._dataframe_kinds,
             kws={"x": "a", "y": "b"},
@@ -1932,19 +1936,19 @@ def test_df_grid_settings(self):
     def test_plain_axes(self):
         # supplied ax itself is a SubplotAxes, but figure contains also
         # a plain Axes object (GH11556)
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         fig.add_axes([0.2, 0.2, 0.2, 0.2])
         Series(np.random.rand(10)).plot(ax=ax)
 
         # supplied ax itself is a plain Axes, but because the cmap keyword
         # a new ax is created for the colorbar -> also multiples axes (GH11520)
         df = DataFrame({"a": np.random.randn(8), "b": np.random.randn(8)})
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         ax = fig.add_axes((0, 0, 1, 1))
         df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv")
 
         # other examples
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         from mpl_toolkits.axes_grid1 import make_axes_locatable
 
         divider = make_axes_locatable(ax)
@@ -1952,7 +1956,7 @@ def test_plain_axes(self):
         Series(np.random.rand(10)).plot(ax=ax)
         Series(np.random.rand(10)).plot(ax=cax)
 
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         from mpl_toolkits.axes_grid1.inset_locator import inset_axes
 
         iax = inset_axes(ax, width="30%", height=1.0, loc=3)
@@ -1973,7 +1977,7 @@ def test_secondary_axis_font_size(self, method):
 
         kwargs = {"secondary_y": sy, "fontsize": fontsize, "mark_right": True}
         ax = getattr(df.plot, method)(**kwargs)
-        self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)
+        _check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)
 
     def test_x_string_values_ticks(self):
         # Test if string plot index have a fixed xtick position
@@ -2022,7 +2026,7 @@ def test_xlim_plot_line(self, kind):
     def test_xlim_plot_line_correctly_in_mixed_plot_type(self):
         # test if xlim is set correctly when ax contains multiple different kinds
         # of plots, GH 27686
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
 
         indexes = ["k1", "k2", "k3", "k4"]
         df = DataFrame(
@@ -2080,7 +2084,7 @@ def test_group_subplot(self, kind):
         expected_labels = (["b", "e"], ["c", "d"], ["a"])
         for ax, labels in zip(axes, expected_labels):
             if kind != "pie":
-                self._check_legend_labels(ax, labels=labels)
+                _check_legend_labels(ax, labels=labels)
             if kind == "line":
                 assert len(ax.lines) == len(labels)
 
diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py
index b9e24ff52070e..e7370375ba27b 100644
--- a/pandas/tests/plotting/frame/test_frame_color.py
+++ b/pandas/tests/plotting/frame/test_frame_color.py
@@ -10,14 +10,16 @@
 from pandas import DataFrame
 import pandas._testing as tm
 from pandas.tests.plotting.common import (
-    TestPlotBase,
+    _check_colors,
     _check_plot_works,
+    _unpack_cycler,
 )
 from pandas.util.version import Version
 
+mpl = pytest.importorskip("matplotlib")
 
-@td.skip_if_no_mpl
-class TestDataFrameColor(TestPlotBase):
+
+class TestDataFrameColor:
     @pytest.mark.parametrize(
         "color", ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
     )
@@ -84,16 +86,16 @@ def test_color_and_marker(self, color, expected):
     def test_bar_colors(self):
         import matplotlib.pyplot as plt
 
-        default_colors = self._unpack_cycler(plt.rcParams)
+        default_colors = _unpack_cycler(plt.rcParams)
 
         df = DataFrame(np.random.randn(5, 5))
         ax = df.plot.bar()
-        self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
+        _check_colors(ax.patches[::5], facecolors=default_colors[:5])
         tm.close()
 
         custom_colors = "rgcby"
         ax = df.plot.bar(color=custom_colors)
-        self._check_colors(ax.patches[::5], facecolors=custom_colors)
+        _check_colors(ax.patches[::5], facecolors=custom_colors)
         tm.close()
 
         from matplotlib import cm
@@ -101,21 +103,21 @@ def test_bar_colors(self):
         # Test str -> colormap functionality
         ax = df.plot.bar(colormap="jet")
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
-        self._check_colors(ax.patches[::5], facecolors=rgba_colors)
+        _check_colors(ax.patches[::5], facecolors=rgba_colors)
         tm.close()
 
         # Test colormap functionality
         ax = df.plot.bar(colormap=cm.jet)
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
-        self._check_colors(ax.patches[::5], facecolors=rgba_colors)
+        _check_colors(ax.patches[::5], facecolors=rgba_colors)
         tm.close()
 
         ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
-        self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
+        _check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
         tm.close()
 
         ax = df.plot(kind="bar", color="green")
-        self._check_colors(ax.patches[::5], facecolors=["green"] * 5)
+        _check_colors(ax.patches[::5], facecolors=["green"] * 5)
         tm.close()
 
     def test_bar_user_colors(self):
@@ -206,12 +208,12 @@ def test_scatter_colors(self):
         with pytest.raises(TypeError, match="Specify exactly one of `c` and `color`"):
             df.plot.scatter(x="a", y="b", c="c", color="green")
 
-        default_colors = self._unpack_cycler(self.plt.rcParams)
+        default_colors = _unpack_cycler(mpl.pyplot.rcParams)
 
         ax = df.plot.scatter(x="a", y="b", c="c")
         tm.assert_numpy_array_equal(
             ax.collections[0].get_facecolor()[0],
-            np.array(self.colorconverter.to_rgba(default_colors[0])),
+            np.array(mpl.colors.ColorConverter.to_rgba(default_colors[0])),
         )
 
         ax = df.plot.scatter(x="a", y="b", color="white")
@@ -241,7 +243,7 @@ def test_line_colors(self):
         df = DataFrame(np.random.randn(5, 5))
 
         ax = df.plot(color=custom_colors)
-        self._check_colors(ax.get_lines(), linecolors=custom_colors)
+        _check_colors(ax.get_lines(), linecolors=custom_colors)
 
         tm.close()
 
@@ -255,27 +257,27 @@ def test_line_colors(self):
 
         ax = df.plot(colormap="jet")
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
-        self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+        _check_colors(ax.get_lines(), linecolors=rgba_colors)
         tm.close()
 
         ax = df.plot(colormap=cm.jet)
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
-        self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+        _check_colors(ax.get_lines(), linecolors=rgba_colors)
         tm.close()
 
         # make color a list if plotting one column frame
         # handles cases like df.plot(color='DodgerBlue')
         ax = df.loc[:, [0]].plot(color="DodgerBlue")
-        self._check_colors(ax.lines, linecolors=["DodgerBlue"])
+        _check_colors(ax.lines, linecolors=["DodgerBlue"])
 
         ax = df.plot(color="red")
-        self._check_colors(ax.get_lines(), linecolors=["red"] * 5)
+        _check_colors(ax.get_lines(), linecolors=["red"] * 5)
         tm.close()
 
         # GH 10299
         custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
         ax = df.plot(color=custom_colors)
-        self._check_colors(ax.get_lines(), linecolors=custom_colors)
+        _check_colors(ax.get_lines(), linecolors=custom_colors)
         tm.close()
 
     def test_dont_modify_colors(self):
@@ -287,68 +289,68 @@ def test_line_colors_and_styles_subplots(self):
         # GH 9894
         from matplotlib import cm
 
-        default_colors = self._unpack_cycler(self.plt.rcParams)
+        default_colors = _unpack_cycler(mpl.pyplot.rcParams)
 
         df = DataFrame(np.random.randn(5, 5))
 
         axes = df.plot(subplots=True)
         for ax, c in zip(axes, list(default_colors)):
-            self._check_colors(ax.get_lines(), linecolors=[c])
+            _check_colors(ax.get_lines(), linecolors=[c])
         tm.close()
 
         # single color char
         axes = df.plot(subplots=True, color="k")
         for ax in axes:
-            self._check_colors(ax.get_lines(), linecolors=["k"])
+            _check_colors(ax.get_lines(), linecolors=["k"])
         tm.close()
 
         # single color str
         axes = df.plot(subplots=True, color="green")
         for ax in axes:
-            self._check_colors(ax.get_lines(), linecolors=["green"])
+            _check_colors(ax.get_lines(), linecolors=["green"])
         tm.close()
 
         custom_colors = "rgcby"
         axes = df.plot(color=custom_colors, subplots=True)
         for ax, c in zip(axes, list(custom_colors)):
-            self._check_colors(ax.get_lines(), linecolors=[c])
+            _check_colors(ax.get_lines(), linecolors=[c])
         tm.close()
 
         axes = df.plot(color=list(custom_colors), subplots=True)
         for ax, c in zip(axes, list(custom_colors)):
-            self._check_colors(ax.get_lines(), linecolors=[c])
+            _check_colors(ax.get_lines(), linecolors=[c])
         tm.close()
 
         # GH 10299
         custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
         axes = df.plot(color=custom_colors, subplots=True)
         for ax, c in zip(axes, list(custom_colors)):
-            self._check_colors(ax.get_lines(), linecolors=[c])
+            _check_colors(ax.get_lines(), linecolors=[c])
         tm.close()
 
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
         for cmap in ["jet", cm.jet]:
             axes = df.plot(colormap=cmap, subplots=True)
             for ax, c in zip(axes, rgba_colors):
-                self._check_colors(ax.get_lines(), linecolors=[c])
+                _check_colors(ax.get_lines(), linecolors=[c])
             tm.close()
 
         # make color a list if plotting one column frame
         # handles cases like df.plot(color='DodgerBlue')
         axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True)
-        self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
+        _check_colors(axes[0].lines, linecolors=["DodgerBlue"])
 
         # single character style
         axes = df.plot(style="r", subplots=True)
         for ax in axes:
-            self._check_colors(ax.get_lines(), linecolors=["r"])
+            _check_colors(ax.get_lines(), linecolors=["r"])
         tm.close()
 
         # list of styles
         styles = list("rgcby")
         axes = df.plot(style=styles, subplots=True)
         for ax, c in zip(axes, styles):
-            self._check_colors(ax.get_lines(), linecolors=[c])
+            _check_colors(ax.get_lines(), linecolors=[c])
         tm.close()
 
     def test_area_colors(self):
@@ -359,12 +361,12 @@ def test_area_colors(self):
         df = DataFrame(np.random.rand(5, 5))
 
         ax = df.plot.area(color=custom_colors)
-        self._check_colors(ax.get_lines(), linecolors=custom_colors)
+        _check_colors(ax.get_lines(), linecolors=custom_colors)
         poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
-        self._check_colors(poly, facecolors=custom_colors)
+        _check_colors(poly, facecolors=custom_colors)
 
         handles, labels = ax.get_legend_handles_labels()
-        self._check_colors(handles, facecolors=custom_colors)
+        _check_colors(handles, facecolors=custom_colors)
 
         for h in handles:
             assert h.get_alpha() is None
@@ -372,40 +374,40 @@ def test_area_colors(self):
 
         ax = df.plot.area(colormap="jet")
         jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
-        self._check_colors(ax.get_lines(), linecolors=jet_colors)
+        _check_colors(ax.get_lines(), linecolors=jet_colors)
         poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
-        self._check_colors(poly, facecolors=jet_colors)
+        _check_colors(poly, facecolors=jet_colors)
 
         handles, labels = ax.get_legend_handles_labels()
-        self._check_colors(handles, facecolors=jet_colors)
+        _check_colors(handles, facecolors=jet_colors)
         for h in handles:
             assert h.get_alpha() is None
         tm.close()
 
         # When stacked=False, alpha is set to 0.5
         ax = df.plot.area(colormap=cm.jet, stacked=False)
-        self._check_colors(ax.get_lines(), linecolors=jet_colors)
+        _check_colors(ax.get_lines(), linecolors=jet_colors)
         poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
         jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
-        self._check_colors(poly, facecolors=jet_with_alpha)
+        _check_colors(poly, facecolors=jet_with_alpha)
 
         handles, labels = ax.get_legend_handles_labels()
         linecolors = jet_with_alpha
-        self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)
+        _check_colors(handles[: len(jet_colors)], linecolors=linecolors)
         for h in handles:
             assert h.get_alpha() == 0.5
 
     def test_hist_colors(self):
-        default_colors = self._unpack_cycler(self.plt.rcParams)
+        default_colors = _unpack_cycler(mpl.pyplot.rcParams)
 
         df = DataFrame(np.random.randn(5, 5))
         ax = df.plot.hist()
-        self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
+        _check_colors(ax.patches[::10], facecolors=default_colors[:5])
         tm.close()
 
         custom_colors = "rgcby"
         ax = df.plot.hist(color=custom_colors)
-        self._check_colors(ax.patches[::10], facecolors=custom_colors)
+        _check_colors(ax.patches[::10], facecolors=custom_colors)
         tm.close()
 
         from matplotlib import cm
@@ -413,20 +415,20 @@ def test_hist_colors(self):
         # Test str -> colormap functionality
         ax = df.plot.hist(colormap="jet")
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
-        self._check_colors(ax.patches[::10], facecolors=rgba_colors)
+        _check_colors(ax.patches[::10], facecolors=rgba_colors)
         tm.close()
 
         # Test colormap functionality
         ax = df.plot.hist(colormap=cm.jet)
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
-        self._check_colors(ax.patches[::10], facecolors=rgba_colors)
+        _check_colors(ax.patches[::10], facecolors=rgba_colors)
         tm.close()
 
         ax = df.loc[:, [0]].plot.hist(color="DodgerBlue")
-        self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
+        _check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
 
         ax = df.plot(kind="hist", color="green")
-        self._check_colors(ax.patches[::10], facecolors=["green"] * 5)
+        _check_colors(ax.patches[::10], facecolors=["green"] * 5)
         tm.close()
 
     @td.skip_if_no_scipy
@@ -437,94 +439,92 @@ def test_kde_colors(self):
         df = DataFrame(np.random.rand(5, 5))
 
         ax = df.plot.kde(color=custom_colors)
-        self._check_colors(ax.get_lines(), linecolors=custom_colors)
+        _check_colors(ax.get_lines(), linecolors=custom_colors)
         tm.close()
 
         ax = df.plot.kde(colormap="jet")
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
-        self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+        _check_colors(ax.get_lines(), linecolors=rgba_colors)
         tm.close()
 
         ax = df.plot.kde(colormap=cm.jet)
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
-        self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+        _check_colors(ax.get_lines(), linecolors=rgba_colors)
 
     @td.skip_if_no_scipy
     def test_kde_colors_and_styles_subplots(self):
         from matplotlib import cm
 
-        default_colors = self._unpack_cycler(self.plt.rcParams)
+        default_colors = _unpack_cycler(mpl.pyplot.rcParams)
 
         df = DataFrame(np.random.randn(5, 5))
 
         axes = df.plot(kind="kde", subplots=True)
         for ax, c in zip(axes, list(default_colors)):
-            self._check_colors(ax.get_lines(), linecolors=[c])
+            _check_colors(ax.get_lines(), linecolors=[c])
         tm.close()
 
         # single color char
         axes = df.plot(kind="kde", color="k", subplots=True)
         for ax in axes:
-            self._check_colors(ax.get_lines(), linecolors=["k"])
+            _check_colors(ax.get_lines(), linecolors=["k"])
         tm.close()
 
         # single color str
         axes = df.plot(kind="kde", color="red", subplots=True)
         for ax in axes:
-            self._check_colors(ax.get_lines(), linecolors=["red"])
+            _check_colors(ax.get_lines(), linecolors=["red"])
         tm.close()
 
         custom_colors = "rgcby"
         axes = df.plot(kind="kde", color=custom_colors, subplots=True)
         for ax, c in zip(axes, list(custom_colors)):
-            self._check_colors(ax.get_lines(), linecolors=[c])
+            _check_colors(ax.get_lines(), linecolors=[c])
         tm.close()
 
         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
         for cmap in ["jet", cm.jet]:
             axes = df.plot(kind="kde", colormap=cmap, subplots=True)
             for ax, c in zip(axes, rgba_colors):
-                self._check_colors(ax.get_lines(), linecolors=[c])
+                _check_colors(ax.get_lines(), linecolors=[c])
             tm.close()
 
         # make color a list if plotting one column frame
         # handles cases like df.plot(color='DodgerBlue')
         axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
-        self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
+        _check_colors(axes[0].lines, linecolors=["DodgerBlue"])
 
         # single character style
         axes = df.plot(kind="kde", style="r", subplots=True)
         for ax in axes:
-            self._check_colors(ax.get_lines(), linecolors=["r"])
+            _check_colors(ax.get_lines(), linecolors=["r"])
         tm.close()
 
         # list of styles
         styles = list("rgcby")
         axes = df.plot(kind="kde", style=styles, subplots=True)
         for ax, c in zip(axes, styles):
-            self._check_colors(ax.get_lines(), linecolors=[c])
+            _check_colors(ax.get_lines(), linecolors=[c])
         tm.close()
 
     def test_boxplot_colors(self):
-        def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
+        def _check_colors_box(
+            bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None
+        ):
             # TODO: outside this func?
             if fliers_c is None:
                 fliers_c = "k"
-            self._check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
-            self._check_colors(
-                bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])
-            )
-            self._check_colors(
-                bp["medians"], linecolors=[medians_c] * len(bp["medians"])
-            )
-            self._check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
-            self._check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
-
-        default_colors = self._unpack_cycler(self.plt.rcParams)
+            _check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
+            _check_colors(bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"]))
+            _check_colors(bp["medians"], linecolors=[medians_c] * len(bp["medians"]))
+            _check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
+            _check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
+
+        default_colors = _unpack_cycler(mpl.pyplot.rcParams)
 
         df = DataFrame(np.random.randn(5, 5))
         bp = df.plot.box(return_type="dict")
-        _check_colors(
+        _check_colors_box(
             bp,
             default_colors[0],
             default_colors[0],
@@ -540,7 +540,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
             "caps": "#123456",
         }
         bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
-        _check_colors(
+        _check_colors_box(
             bp,
             dict_colors["boxes"],
             dict_colors["whiskers"],
@@ -553,7 +553,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
         # partial colors
         dict_colors = {"whiskers": "c", "medians": "m"}
         bp = df.plot.box(color=dict_colors, return_type="dict")
-        _check_colors(bp, default_colors[0], "c", "m", default_colors[0])
+        _check_colors_box(bp, default_colors[0], "c", "m", default_colors[0])
         tm.close()
 
         from matplotlib import cm
@@ -561,21 +561,25 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
         # Test str -> colormap functionality
         bp = df.plot.box(colormap="jet", return_type="dict")
         jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]
-        _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0])
+        _check_colors_box(
+            bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0]
+        )
         tm.close()
 
         # Test colormap functionality
         bp = df.plot.box(colormap=cm.jet, return_type="dict")
-        _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0])
+        _check_colors_box(
+            bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0]
+        )
         tm.close()
 
         # string color is applied to all artists except fliers
         bp = df.plot.box(color="DodgerBlue", return_type="dict")
-        _check_colors(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue")
+        _check_colors_box(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue")
 
         # tuple is also applied to all artists except fliers
         bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict")
-        _check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456")
+        _check_colors_box(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456")
 
         msg = re.escape(
             "color dict contains invalid key 'xxxx'. The key must be either "
@@ -595,8 +599,8 @@ def test_default_color_cycle(self):
         df = DataFrame(np.random.randn(5, 3))
         ax = df.plot()
 
-        expected = self._unpack_cycler(plt.rcParams)[:3]
-        self._check_colors(ax.get_lines(), linecolors=expected)
+        expected = _unpack_cycler(plt.rcParams)[:3]
+        _check_colors(ax.get_lines(), linecolors=expected)
 
     def test_no_color_bar(self):
         df = DataFrame(
@@ -664,5 +668,5 @@ def test_dataframe_none_color(self):
         # GH51953
         df = DataFrame([[1, 2, 3]])
         ax = df.plot(color=None)
-        expected = self._unpack_cycler(self.plt.rcParams)[:3]
-        self._check_colors(ax.get_lines(), linecolors=expected)
+        expected = _unpack_cycler(mpl.pyplot.rcParams)[:3]
+        _check_colors(ax.get_lines(), linecolors=expected)
diff --git a/pandas/tests/plotting/frame/test_frame_groupby.py b/pandas/tests/plotting/frame/test_frame_groupby.py
index 9c148645966ad..f1924185a3df1 100644
--- a/pandas/tests/plotting/frame/test_frame_groupby.py
+++ b/pandas/tests/plotting/frame/test_frame_groupby.py
@@ -2,21 +2,20 @@
 
 import pytest
 
-import pandas.util._test_decorators as td
-
 from pandas import DataFrame
-from pandas.tests.plotting.common import TestPlotBase
+from pandas.tests.plotting.common import _check_visible
+
+pytest.importorskip("matplotlib")
 
 
-@td.skip_if_no_mpl
-class TestDataFramePlotsGroupby(TestPlotBase):
+class TestDataFramePlotsGroupby:
     def _assert_ytickslabels_visibility(self, axes, expected):
         for ax, exp in zip(axes, expected):
-            self._check_visible(ax.get_yticklabels(), visible=exp)
+            _check_visible(ax.get_yticklabels(), visible=exp)
 
     def _assert_xtickslabels_visibility(self, axes, expected):
         for ax, exp in zip(axes, expected):
-            self._check_visible(ax.get_xticklabels(), visible=exp)
+            _check_visible(ax.get_xticklabels(), visible=exp)
 
     @pytest.mark.parametrize(
         "kwargs, expected",
diff --git a/pandas/tests/plotting/frame/test_frame_legend.py b/pandas/tests/plotting/frame/test_frame_legend.py
index bad42ebc85cc8..5914300b00434 100644
--- a/pandas/tests/plotting/frame/test_frame_legend.py
+++ b/pandas/tests/plotting/frame/test_frame_legend.py
@@ -7,11 +7,17 @@
     DataFrame,
     date_range,
 )
-from pandas.tests.plotting.common import TestPlotBase
+from pandas.tests.plotting.common import (
+    _check_legend_labels,
+    _check_legend_marker,
+    _check_text_labels,
+)
 from pandas.util.version import Version
 
+mpl = pytest.importorskip("matplotlib")
+
 
-class TestFrameLegend(TestPlotBase):
+class TestFrameLegend:
     @pytest.mark.xfail(
         reason=(
             "Open bug in matplotlib "
@@ -66,27 +72,25 @@ def test_df_legend_labels(self):
 
         for kind in kinds:
             ax = df.plot(kind=kind, legend=True)
-            self._check_legend_labels(ax, labels=df.columns)
+            _check_legend_labels(ax, labels=df.columns)
 
             ax = df2.plot(kind=kind, legend=False, ax=ax)
-            self._check_legend_labels(ax, labels=df.columns)
+            _check_legend_labels(ax, labels=df.columns)
 
             ax = df3.plot(kind=kind, legend=True, ax=ax)
-            self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
+            _check_legend_labels(ax, labels=df.columns.union(df3.columns))
 
             ax = df4.plot(kind=kind, legend="reverse", ax=ax)
             expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
-            self._check_legend_labels(ax, labels=expected)
+            _check_legend_labels(ax, labels=expected)
 
         # Secondary Y
         ax = df.plot(legend=True, secondary_y="b")
-        self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
+        _check_legend_labels(ax, labels=["a", "b (right)", "c"])
         ax = df2.plot(legend=False, ax=ax)
-        self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
+        _check_legend_labels(ax, labels=["a", "b (right)", "c"])
         ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax)
-        self._check_legend_labels(
-            ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"]
-        )
+        _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"])
 
         # Time Series
         ind = date_range("1/1/2014", periods=3)
@@ -94,55 +98,55 @@ def test_df_legend_labels(self):
         df2 = DataFrame(np.random.randn(3, 3), columns=["d", "e", "f"], index=ind)
         df3 = DataFrame(np.random.randn(3, 3), columns=["g", "h", "i"], index=ind)
         ax = df.plot(legend=True, secondary_y="b")
-        self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
+        _check_legend_labels(ax, labels=["a", "b (right)", "c"])
         ax = df2.plot(legend=False, ax=ax)
-        self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
+        _check_legend_labels(ax, labels=["a", "b (right)", "c"])
         ax = df3.plot(legend=True, ax=ax)
-        self._check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"])
+        _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"])
 
         # scatter
         ax = df.plot.scatter(x="a", y="b", label="data1")
-        self._check_legend_labels(ax, labels=["data1"])
+        _check_legend_labels(ax, labels=["data1"])
         ax = df2.plot.scatter(x="d", y="e", legend=False, label="data2", ax=ax)
-        self._check_legend_labels(ax, labels=["data1"])
+        _check_legend_labels(ax, labels=["data1"])
         ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax)
-        self._check_legend_labels(ax, labels=["data1", "data3"])
+        _check_legend_labels(ax, labels=["data1", "data3"])
 
         # ensure label args pass through and
         # index name does not mutate
         # column names don't mutate
         df5 = df.set_index("a")
         ax = df5.plot(y="b")
-        self._check_legend_labels(ax, labels=["b"])
+        _check_legend_labels(ax, labels=["b"])
         ax = df5.plot(y="b", label="LABEL_b")
-        self._check_legend_labels(ax, labels=["LABEL_b"])
-        self._check_text_labels(ax.xaxis.get_label(), "a")
+        _check_legend_labels(ax, labels=["LABEL_b"])
+        _check_text_labels(ax.xaxis.get_label(), "a")
         ax = df5.plot(y="c", label="LABEL_c", ax=ax)
-        self._check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"])
+        _check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"])
         assert df5.columns.tolist() == ["b", "c"]
 
     def test_missing_marker_multi_plots_on_same_ax(self):
         # GH 18222
         df = DataFrame(data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"])
-        fig, ax = self.plt.subplots(nrows=1, ncols=3)
+        fig, ax = mpl.pyplot.subplots(nrows=1, ncols=3)
         # Left plot
         df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0])
         df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0])
         df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0])
-        self._check_legend_labels(ax[0], labels=["r", "g", "b"])
-        self._check_legend_marker(ax[0], expected_markers=["o", "x", "o"])
+        _check_legend_labels(ax[0], labels=["r", "g", "b"])
+        _check_legend_marker(ax[0], expected_markers=["o", "x", "o"])
         # Center plot
         df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1])
         df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1])
         df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1])
-        self._check_legend_labels(ax[1], labels=["b", "r", "g"])
-        self._check_legend_marker(ax[1], expected_markers=["o", "o", "x"])
+        _check_legend_labels(ax[1], labels=["b", "r", "g"])
+        _check_legend_marker(ax[1], expected_markers=["o", "o", "x"])
         # Right plot
         df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2])
         df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2])
         df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2])
-        self._check_legend_labels(ax[2], labels=["g", "b", "r"])
-        self._check_legend_marker(ax[2], expected_markers=["x", "o", "o"])
+        _check_legend_labels(ax[2], labels=["g", "b", "r"])
+        _check_legend_marker(ax[2], expected_markers=["x", "o", "o"])
 
     def test_legend_name(self):
         multi = DataFrame(
@@ -153,21 +157,21 @@ def test_legend_name(self):
 
         ax = multi.plot()
         leg_title = ax.legend_.get_title()
-        self._check_text_labels(leg_title, "group,individual")
+        _check_text_labels(leg_title, "group,individual")
 
         df = DataFrame(np.random.randn(5, 5))
         ax = df.plot(legend=True, ax=ax)
         leg_title = ax.legend_.get_title()
-        self._check_text_labels(leg_title, "group,individual")
+        _check_text_labels(leg_title, "group,individual")
 
         df.columns.name = "new"
         ax = df.plot(legend=False, ax=ax)
         leg_title = ax.legend_.get_title()
-        self._check_text_labels(leg_title, "group,individual")
+        _check_text_labels(leg_title, "group,individual")
 
         ax = df.plot(legend=True, ax=ax)
         leg_title = ax.legend_.get_title()
-        self._check_text_labels(leg_title, "new")
+        _check_text_labels(leg_title, "new")
 
     @pytest.mark.parametrize(
         "kind",
@@ -183,7 +187,7 @@ def test_legend_name(self):
     def test_no_legend(self, kind):
         df = DataFrame(np.random.rand(3, 3), columns=["a", "b", "c"])
         ax = df.plot(kind=kind, legend=False)
-        self._check_legend_labels(ax, visible=False)
+        _check_legend_labels(ax, visible=False)
 
     def test_missing_markers_legend(self):
         # 14958
@@ -192,8 +196,8 @@ def test_missing_markers_legend(self):
         df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax)
         df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax)
 
-        self._check_legend_labels(ax, labels=["A", "B", "C"])
-        self._check_legend_marker(ax, expected_markers=["x", "o", "<"])
+        _check_legend_labels(ax, labels=["A", "B", "C"])
+        _check_legend_marker(ax, expected_markers=["x", "o", "<"])
 
     def test_missing_markers_legend_using_style(self):
         # 14563
@@ -206,9 +210,9 @@ def test_missing_markers_legend_using_style(self):
             }
         )
 
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         for kind in "ABC":
             df.plot("X", kind, label=kind, ax=ax, style=".")
 
-        self._check_legend_labels(ax, labels=["A", "B", "C"])
-        self._check_legend_marker(ax, expected_markers=[".", ".", "."])
+        _check_legend_labels(ax, labels=["A", "B", "C"])
+        _check_legend_marker(ax, expected_markers=[".", ".", "."])
diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py
index 4f55f9504f0db..336fed6293070 100644
--- a/pandas/tests/plotting/frame/test_frame_subplots.py
+++ b/pandas/tests/plotting/frame/test_frame_subplots.py
@@ -8,7 +8,6 @@
 
 from pandas.compat import is_platform_linux
 from pandas.compat.numpy import np_version_gte1p24
-import pandas.util._test_decorators as td
 
 import pandas as pd
 from pandas import (
@@ -17,47 +16,55 @@
     date_range,
 )
 import pandas._testing as tm
-from pandas.tests.plotting.common import TestPlotBase
+from pandas.tests.plotting.common import (
+    _check_axes_shape,
+    _check_box_return_type,
+    _check_legend_labels,
+    _check_ticks_props,
+    _check_visible,
+    _flatten_visible,
+)
 
 from pandas.io.formats.printing import pprint_thing
 
+mpl = pytest.importorskip("matplotlib")
+
 
-@td.skip_if_no_mpl
-class TestDataFramePlotsSubplots(TestPlotBase):
+class TestDataFramePlotsSubplots:
     @pytest.mark.slow
     def test_subplots(self):
         df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
 
         for kind in ["bar", "barh", "line", "area"]:
             axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
-            self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+            _check_axes_shape(axes, axes_num=3, layout=(3, 1))
             assert axes.shape == (3,)
 
             for ax, column in zip(axes, df.columns):
-                self._check_legend_labels(ax, labels=[pprint_thing(column)])
+                _check_legend_labels(ax, labels=[pprint_thing(column)])
 
             for ax in axes[:-2]:
-                self._check_visible(ax.xaxis)  # xaxis must be visible for grid
-                self._check_visible(ax.get_xticklabels(), visible=False)
+                _check_visible(ax.xaxis)  # xaxis must be visible for grid
+                _check_visible(ax.get_xticklabels(), visible=False)
                 if kind != "bar":
                     # change https://github.com/pandas-dev/pandas/issues/26714
-                    self._check_visible(ax.get_xticklabels(minor=True), visible=False)
-                self._check_visible(ax.xaxis.get_label(), visible=False)
-                self._check_visible(ax.get_yticklabels())
+                    _check_visible(ax.get_xticklabels(minor=True), visible=False)
+                _check_visible(ax.xaxis.get_label(), visible=False)
+                _check_visible(ax.get_yticklabels())
 
-            self._check_visible(axes[-1].xaxis)
-            self._check_visible(axes[-1].get_xticklabels())
-            self._check_visible(axes[-1].get_xticklabels(minor=True))
-            self._check_visible(axes[-1].xaxis.get_label())
-            self._check_visible(axes[-1].get_yticklabels())
+            _check_visible(axes[-1].xaxis)
+            _check_visible(axes[-1].get_xticklabels())
+            _check_visible(axes[-1].get_xticklabels(minor=True))
+            _check_visible(axes[-1].xaxis.get_label())
+            _check_visible(axes[-1].get_yticklabels())
 
             axes = df.plot(kind=kind, subplots=True, sharex=False)
             for ax in axes:
-                self._check_visible(ax.xaxis)
-                self._check_visible(ax.get_xticklabels())
-                self._check_visible(ax.get_xticklabels(minor=True))
-                self._check_visible(ax.xaxis.get_label())
-                self._check_visible(ax.get_yticklabels())
+                _check_visible(ax.xaxis)
+                _check_visible(ax.get_xticklabels())
+                _check_visible(ax.get_xticklabels(minor=True))
+                _check_visible(ax.xaxis.get_label())
+                _check_visible(ax.get_yticklabels())
 
             axes = df.plot(kind=kind, subplots=True, legend=False)
             for ax in axes:
@@ -69,31 +76,31 @@ def test_subplots_timeseries(self):
 
         for kind in ["line", "area"]:
             axes = df.plot(kind=kind, subplots=True, sharex=True)
-            self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+            _check_axes_shape(axes, axes_num=3, layout=(3, 1))
 
             for ax in axes[:-2]:
                 # GH 7801
-                self._check_visible(ax.xaxis)  # xaxis must be visible for grid
-                self._check_visible(ax.get_xticklabels(), visible=False)
-                self._check_visible(ax.get_xticklabels(minor=True), visible=False)
-                self._check_visible(ax.xaxis.get_label(), visible=False)
-                self._check_visible(ax.get_yticklabels())
-
-            self._check_visible(axes[-1].xaxis)
-            self._check_visible(axes[-1].get_xticklabels())
-            self._check_visible(axes[-1].get_xticklabels(minor=True))
-            self._check_visible(axes[-1].xaxis.get_label())
-            self._check_visible(axes[-1].get_yticklabels())
-            self._check_ticks_props(axes, xrot=0)
+                _check_visible(ax.xaxis)  # xaxis must be visible for grid
+                _check_visible(ax.get_xticklabels(), visible=False)
+                _check_visible(ax.get_xticklabels(minor=True), visible=False)
+                _check_visible(ax.xaxis.get_label(), visible=False)
+                _check_visible(ax.get_yticklabels())
+
+            _check_visible(axes[-1].xaxis)
+            _check_visible(axes[-1].get_xticklabels())
+            _check_visible(axes[-1].get_xticklabels(minor=True))
+            _check_visible(axes[-1].xaxis.get_label())
+            _check_visible(axes[-1].get_yticklabels())
+            _check_ticks_props(axes, xrot=0)
 
             axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
             for ax in axes:
-                self._check_visible(ax.xaxis)
-                self._check_visible(ax.get_xticklabels())
-                self._check_visible(ax.get_xticklabels(minor=True))
-                self._check_visible(ax.xaxis.get_label())
-                self._check_visible(ax.get_yticklabels())
-                self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
+                _check_visible(ax.xaxis)
+                _check_visible(ax.get_xticklabels())
+                _check_visible(ax.get_xticklabels(minor=True))
+                _check_visible(ax.xaxis.get_label())
+                _check_visible(ax.get_yticklabels())
+                _check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
 
     def test_subplots_timeseries_y_axis(self):
         # GH16953
@@ -185,27 +192,27 @@ def test_subplots_layout_multi_column(self):
         df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
 
         axes = df.plot(subplots=True, layout=(2, 2))
-        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=3, layout=(2, 2))
         assert axes.shape == (2, 2)
 
         axes = df.plot(subplots=True, layout=(-1, 2))
-        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=3, layout=(2, 2))
         assert axes.shape == (2, 2)
 
         axes = df.plot(subplots=True, layout=(2, -1))
-        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=3, layout=(2, 2))
         assert axes.shape == (2, 2)
 
         axes = df.plot(subplots=True, layout=(1, 4))
-        self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
+        _check_axes_shape(axes, axes_num=3, layout=(1, 4))
         assert axes.shape == (1, 4)
 
         axes = df.plot(subplots=True, layout=(-1, 4))
-        self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
+        _check_axes_shape(axes, axes_num=3, layout=(1, 4))
         assert axes.shape == (1, 4)
 
         axes = df.plot(subplots=True, layout=(4, -1))
-        self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
+        _check_axes_shape(axes, axes_num=3, layout=(4, 1))
         assert axes.shape == (4, 1)
 
         msg = "Layout of 1x1 must be larger than required size 3"
@@ -230,7 +237,7 @@ def test_subplots_layout_single_column(
         # GH 6667
         df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
         axes = df.plot(subplots=True, **kwargs)
-        self._check_axes_shape(
+        _check_axes_shape(
             axes,
             axes_num=expected_axes_num,
             layout=expected_layout,
@@ -251,25 +258,25 @@ def test_subplots_warnings(self):
 
     def test_subplots_multiple_axes(self):
         # GH 5353, 6970, GH 7069
-        fig, axes = self.plt.subplots(2, 3)
+        fig, axes = mpl.pyplot.subplots(2, 3)
         df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
 
         returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
-        self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
+        _check_axes_shape(returned, axes_num=3, layout=(1, 3))
         assert returned.shape == (3,)
         assert returned[0].figure is fig
         # draw on second row
         returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
-        self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
+        _check_axes_shape(returned, axes_num=3, layout=(1, 3))
         assert returned.shape == (3,)
         assert returned[0].figure is fig
-        self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
+        _check_axes_shape(axes, axes_num=6, layout=(2, 3))
         tm.close()
 
         msg = "The number of passed axes must be 3, the same as the output plot"
 
         with pytest.raises(ValueError, match=msg):
-            fig, axes = self.plt.subplots(2, 3)
+            fig, axes = mpl.pyplot.subplots(2, 3)
             # pass different number of axes from required
             df.plot(subplots=True, ax=axes)
 
@@ -277,7 +284,7 @@ def test_subplots_multiple_axes(self):
         # invalid lauout should not affect to input and return value
         # (show warning is tested in
         # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
-        fig, axes = self.plt.subplots(2, 2)
+        fig, axes = mpl.pyplot.subplots(2, 2)
         with warnings.catch_warnings():
             warnings.simplefilter("ignore", UserWarning)
             df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
@@ -285,33 +292,33 @@ def test_subplots_multiple_axes(self):
             returned = df.plot(
                 subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
             )
-            self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
+            _check_axes_shape(returned, axes_num=4, layout=(2, 2))
             assert returned.shape == (4,)
 
             returned = df.plot(
                 subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
             )
-            self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
+            _check_axes_shape(returned, axes_num=4, layout=(2, 2))
             assert returned.shape == (4,)
 
             returned = df.plot(
                 subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
             )
-        self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
+        _check_axes_shape(returned, axes_num=4, layout=(2, 2))
         assert returned.shape == (4,)
 
         # single column
-        fig, axes = self.plt.subplots(1, 1)
+        fig, axes = mpl.pyplot.subplots(1, 1)
         df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
 
         axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
         assert axes.shape == (1,)
 
     def test_subplots_ts_share_axes(self):
         # GH 3964
-        fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
-        self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
+        fig, axes = mpl.pyplot.subplots(3, 3, sharex=True, sharey=True)
+        mpl.pyplot.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
         df = DataFrame(
             np.random.randn(10, 9),
             index=date_range(start="2014-07-01", freq="M", periods=10),
@@ -321,21 +328,21 @@ def test_subplots_ts_share_axes(self):
 
         # Rows other than bottom should not be visible
         for ax in axes[0:-1].ravel():
-            self._check_visible(ax.get_xticklabels(), visible=False)
+            _check_visible(ax.get_xticklabels(), visible=False)
 
         # Bottom row should be visible
         for ax in axes[-1].ravel():
-            self._check_visible(ax.get_xticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(), visible=True)
 
         # First column should be visible
         for ax in axes[[0, 1, 2], [0]].ravel():
-            self._check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
 
         # Other columns should not be visible
         for ax in axes[[0, 1, 2], [1]].ravel():
-            self._check_visible(ax.get_yticklabels(), visible=False)
+            _check_visible(ax.get_yticklabels(), visible=False)
         for ax in axes[[0, 1, 2], [2]].ravel():
-            self._check_visible(ax.get_yticklabels(), visible=False)
+            _check_visible(ax.get_yticklabels(), visible=False)
 
     def test_subplots_sharex_axes_existing_axes(self):
         # GH 9158
@@ -345,29 +352,29 @@ def test_subplots_sharex_axes_existing_axes(self):
         axes = df[["A", "B"]].plot(subplots=True)
         df["C"].plot(ax=axes[0], secondary_y=True)
 
-        self._check_visible(axes[0].get_xticklabels(), visible=False)
-        self._check_visible(axes[1].get_xticklabels(), visible=True)
+        _check_visible(axes[0].get_xticklabels(), visible=False)
+        _check_visible(axes[1].get_xticklabels(), visible=True)
         for ax in axes.ravel():
-            self._check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
 
     def test_subplots_dup_columns(self):
         # GH 10962
         df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
         axes = df.plot(subplots=True)
         for ax in axes:
-            self._check_legend_labels(ax, labels=["a"])
+            _check_legend_labels(ax, labels=["a"])
             assert len(ax.lines) == 1
         tm.close()
 
         axes = df.plot(subplots=True, secondary_y="a")
         for ax in axes:
             # (right) is only attached when subplots=False
-            self._check_legend_labels(ax, labels=["a"])
+            _check_legend_labels(ax, labels=["a"])
             assert len(ax.lines) == 1
         tm.close()
 
         ax = df.plot(secondary_y="a")
-        self._check_legend_labels(ax, labels=["a (right)"] * 5)
+        _check_legend_labels(ax, labels=["a (right)"] * 5)
         assert len(ax.lines) == 0
         assert len(ax.right_ax.lines) == 5
 
@@ -407,13 +414,13 @@ def test_boxplot_subplots_return_type(self, hist_df):
         # normal style: return_type=None
         result = df.plot.box(subplots=True)
         assert isinstance(result, Series)
-        self._check_box_return_type(
+        _check_box_return_type(
             result, None, expected_keys=["height", "weight", "category"]
         )
 
         for t in ["dict", "axes", "both"]:
             returned = df.plot.box(return_type=t, subplots=True)
-            self._check_box_return_type(
+            _check_box_return_type(
                 returned,
                 t,
                 expected_keys=["height", "weight", "category"],
@@ -435,12 +442,12 @@ def test_df_subplots_patterns_minorticks(self):
         axes = df.plot(subplots=True, ax=axes)
         for ax in axes:
             assert len(ax.lines) == 1
-            self._check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
         # xaxis of 1st ax must be hidden
-        self._check_visible(axes[0].get_xticklabels(), visible=False)
-        self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
-        self._check_visible(axes[1].get_xticklabels(), visible=True)
-        self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
+        _check_visible(axes[0].get_xticklabels(), visible=False)
+        _check_visible(axes[0].get_xticklabels(minor=True), visible=False)
+        _check_visible(axes[1].get_xticklabels(), visible=True)
+        _check_visible(axes[1].get_xticklabels(minor=True), visible=True)
         tm.close()
 
         fig, axes = plt.subplots(2, 1)
@@ -448,12 +455,12 @@ def test_df_subplots_patterns_minorticks(self):
             axes = df.plot(subplots=True, ax=axes, sharex=True)
         for ax in axes:
             assert len(ax.lines) == 1
-            self._check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
         # xaxis of 1st ax must be hidden
-        self._check_visible(axes[0].get_xticklabels(), visible=False)
-        self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
-        self._check_visible(axes[1].get_xticklabels(), visible=True)
-        self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
+        _check_visible(axes[0].get_xticklabels(), visible=False)
+        _check_visible(axes[0].get_xticklabels(minor=True), visible=False)
+        _check_visible(axes[1].get_xticklabels(), visible=True)
+        _check_visible(axes[1].get_xticklabels(minor=True), visible=True)
         tm.close()
 
         # not shared
@@ -461,9 +468,9 @@ def test_df_subplots_patterns_minorticks(self):
         axes = df.plot(subplots=True, ax=axes)
         for ax in axes:
             assert len(ax.lines) == 1
-            self._check_visible(ax.get_yticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(), visible=True)
-            self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+            _check_visible(ax.get_yticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(), visible=True)
+            _check_visible(ax.get_xticklabels(minor=True), visible=True)
         tm.close()
 
     def test_subplots_sharex_false(self):
@@ -473,7 +480,7 @@ def test_subplots_sharex_false(self):
         df.iloc[5:, 1] = np.nan
         df.iloc[:5, 0] = np.nan
 
-        figs, axs = self.plt.subplots(2, 1)
+        figs, axs = mpl.pyplot.subplots(2, 1)
         df.plot.line(ax=axs, subplots=True, sharex=False)
 
         expected_ax1 = np.arange(4.5, 10, 0.5)
@@ -487,13 +494,13 @@ def test_subplots_constrained_layout(self):
         idx = date_range(start="now", periods=10)
         df = DataFrame(np.random.rand(10, 3), index=idx)
         kwargs = {}
-        if hasattr(self.plt.Figure, "get_constrained_layout"):
+        if hasattr(mpl.pyplot.Figure, "get_constrained_layout"):
             kwargs["constrained_layout"] = True
-        fig, axes = self.plt.subplots(2, **kwargs)
+        fig, axes = mpl.pyplot.subplots(2, **kwargs)
         with tm.assert_produces_warning(None):
             df.plot(ax=axes[0])
             with tm.ensure_clean(return_filelike=True) as path:
-                self.plt.savefig(path)
+                mpl.pyplot.savefig(path)
 
     @pytest.mark.parametrize(
         "index_name, old_label, new_label",
@@ -632,7 +639,7 @@ def _check_bar_alignment(
             grid=True,
         )
 
-        axes = self._flatten_visible(axes)
+        axes = _flatten_visible(axes)
 
         for ax in axes:
             if kind == "bar":
diff --git a/pandas/tests/plotting/frame/test_hist_box_by.py b/pandas/tests/plotting/frame/test_hist_box_by.py
index 999118144b58d..c8b71c04001e5 100644
--- a/pandas/tests/plotting/frame/test_hist_box_by.py
+++ b/pandas/tests/plotting/frame/test_hist_box_by.py
@@ -3,15 +3,17 @@
 import numpy as np
 import pytest
 
-import pandas.util._test_decorators as td
-
 from pandas import DataFrame
 import pandas._testing as tm
 from pandas.tests.plotting.common import (
-    TestPlotBase,
+    _check_axes_shape,
     _check_plot_works,
+    get_x_axis,
+    get_y_axis,
 )
 
+pytest.importorskip("matplotlib")
+
 
 @pytest.fixture
 def hist_df():
@@ -22,8 +24,7 @@ def hist_df():
     return df
 
 
-@td.skip_if_no_mpl
-class TestHistWithBy(TestPlotBase):
+class TestHistWithBy:
     @pytest.mark.slow
     @pytest.mark.parametrize(
         "by, column, titles, legends",
@@ -172,7 +173,7 @@ def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
             axes = _check_plot_works(
                 hist_df.plot.hist, column=column, by=by, layout=layout
             )
-        self._check_axes_shape(axes, axes_num=axes_num, layout=layout)
+        _check_axes_shape(axes, axes_num=axes_num, layout=layout)
 
     @pytest.mark.parametrize(
         "msg, by, layout",
@@ -194,16 +195,16 @@ def test_axis_share_x_with_by(self, hist_df):
         ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharex=True)
 
         # share x
-        assert self.get_x_axis(ax1).joined(ax1, ax2)
-        assert self.get_x_axis(ax2).joined(ax1, ax2)
-        assert self.get_x_axis(ax3).joined(ax1, ax3)
-        assert self.get_x_axis(ax3).joined(ax2, ax3)
+        assert get_x_axis(ax1).joined(ax1, ax2)
+        assert get_x_axis(ax2).joined(ax1, ax2)
+        assert get_x_axis(ax3).joined(ax1, ax3)
+        assert get_x_axis(ax3).joined(ax2, ax3)
 
         # don't share y
-        assert not self.get_y_axis(ax1).joined(ax1, ax2)
-        assert not self.get_y_axis(ax2).joined(ax1, ax2)
-        assert not self.get_y_axis(ax3).joined(ax1, ax3)
-        assert not self.get_y_axis(ax3).joined(ax2, ax3)
+        assert not get_y_axis(ax1).joined(ax1, ax2)
+        assert not get_y_axis(ax2).joined(ax1, ax2)
+        assert not get_y_axis(ax3).joined(ax1, ax3)
+        assert not get_y_axis(ax3).joined(ax2, ax3)
 
     @pytest.mark.slow
     def test_axis_share_y_with_by(self, hist_df):
@@ -211,26 +212,25 @@ def test_axis_share_y_with_by(self, hist_df):
         ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharey=True)
 
         # share y
-        assert self.get_y_axis(ax1).joined(ax1, ax2)
-        assert self.get_y_axis(ax2).joined(ax1, ax2)
-        assert self.get_y_axis(ax3).joined(ax1, ax3)
-        assert self.get_y_axis(ax3).joined(ax2, ax3)
+        assert get_y_axis(ax1).joined(ax1, ax2)
+        assert get_y_axis(ax2).joined(ax1, ax2)
+        assert get_y_axis(ax3).joined(ax1, ax3)
+        assert get_y_axis(ax3).joined(ax2, ax3)
 
         # don't share x
-        assert not self.get_x_axis(ax1).joined(ax1, ax2)
-        assert not self.get_x_axis(ax2).joined(ax1, ax2)
-        assert not self.get_x_axis(ax3).joined(ax1, ax3)
-        assert not self.get_x_axis(ax3).joined(ax2, ax3)
+        assert not get_x_axis(ax1).joined(ax1, ax2)
+        assert not get_x_axis(ax2).joined(ax1, ax2)
+        assert not get_x_axis(ax3).joined(ax1, ax3)
+        assert not get_x_axis(ax3).joined(ax2, ax3)
 
     @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
     def test_figure_shape_hist_with_by(self, figsize, hist_df):
         # GH 15079
         axes = hist_df.plot.hist(column="A", by="C", figsize=figsize)
-        self._check_axes_shape(axes, axes_num=3, figsize=figsize)
+        _check_axes_shape(axes, axes_num=3, figsize=figsize)
 
 
-@td.skip_if_no_mpl
-class TestBoxWithBy(TestPlotBase):
+class TestBoxWithBy:
     @pytest.mark.parametrize(
         "by, column, titles, xticklabels",
         [
@@ -360,7 +360,7 @@ def test_box_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
         axes = _check_plot_works(
             hist_df.plot.box, default_axes=True, column=column, by=by, layout=layout
         )
-        self._check_axes_shape(axes, axes_num=axes_num, layout=layout)
+        _check_axes_shape(axes, axes_num=axes_num, layout=layout)
 
     @pytest.mark.parametrize(
         "msg, by, layout",
@@ -380,4 +380,4 @@ def test_box_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df):
     def test_figure_shape_hist_with_by(self, figsize, hist_df):
         # GH 15079
         axes = hist_df.plot.box(column="A", by="C", figsize=figsize)
-        self._check_axes_shape(axes, axes_num=1, figsize=figsize)
+        _check_axes_shape(axes, axes_num=1, figsize=figsize)
diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py
index c087d3be293e7..c0ad8e0c9608d 100644
--- a/pandas/tests/plotting/test_backend.py
+++ b/pandas/tests/plotting/test_backend.py
@@ -7,8 +7,12 @@
 
 import pandas
 
-dummy_backend = types.ModuleType("pandas_dummy_backend")
-setattr(dummy_backend, "plot", lambda *args, **kwargs: "used_dummy")
+
+@pytest.fixture
+def dummy_backend():
+    db = types.ModuleType("pandas_dummy_backend")
+    setattr(db, "plot", lambda *args, **kwargs: "used_dummy")
+    return db
 
 
 @pytest.fixture
@@ -26,7 +30,7 @@ def test_backend_is_not_module():
     assert pandas.options.plotting.backend == "matplotlib"
 
 
-def test_backend_is_correct(monkeypatch, restore_backend):
+def test_backend_is_correct(monkeypatch, restore_backend, dummy_backend):
     monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
 
     pandas.set_option("plotting.backend", "pandas_dummy_backend")
@@ -36,7 +40,7 @@ def test_backend_is_correct(monkeypatch, restore_backend):
     )
 
 
-def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend):
+def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend, dummy_backend):
     monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
     df = pandas.DataFrame([1, 2, 3])
 
@@ -44,7 +48,7 @@ def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend):
     assert df.plot(backend="pandas_dummy_backend") == "used_dummy"
 
 
-def test_register_entrypoint(restore_backend, tmp_path, monkeypatch):
+def test_register_entrypoint(restore_backend, tmp_path, monkeypatch, dummy_backend):
     monkeypatch.syspath_prepend(tmp_path)
     monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
 
@@ -86,7 +90,7 @@ def test_no_matplotlib_ok():
         pandas.plotting._core._get_plot_backend("matplotlib")
 
 
-def test_extra_kinds_ok(monkeypatch, restore_backend):
+def test_extra_kinds_ok(monkeypatch, restore_backend, dummy_backend):
     # https://github.com/pandas-dev/pandas/pull/28647
     monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
     pandas.set_option("plotting.backend", "pandas_dummy_backend")
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 9bbc8f42b6704..81947706f3fe1 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -6,8 +6,6 @@
 import numpy as np
 import pytest
 
-import pandas.util._test_decorators as td
-
 from pandas import (
     DataFrame,
     MultiIndex,
@@ -18,15 +16,19 @@
 )
 import pandas._testing as tm
 from pandas.tests.plotting.common import (
-    TestPlotBase,
+    _check_axes_shape,
+    _check_box_return_type,
     _check_plot_works,
+    _check_ticks_props,
+    _check_visible,
 )
 
 from pandas.io.formats.printing import pprint_thing
 
+mpl = pytest.importorskip("matplotlib")
+
 
-@td.skip_if_no_mpl
-class TestDataFramePlots(TestPlotBase):
+class TestDataFramePlots:
     def test_stacked_boxplot_set_axis(self):
         # GH2980
         import matplotlib.pyplot as plt
@@ -82,18 +84,18 @@ def test_boxplot_legacy2(self):
 
         # When ax is supplied and required number of axes is 1,
         # passed ax should be used:
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         axes = df.boxplot("Col1", by="X", ax=ax)
         ax_axes = ax.axes
         assert ax_axes is axes
 
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
         ax_axes = ax.axes
         assert ax_axes is axes["A"]
 
         # Multiple columns with an ax argument should use same figure
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         with tm.assert_produces_warning(UserWarning):
             axes = df.boxplot(
                 column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
@@ -102,7 +104,7 @@ def test_boxplot_legacy2(self):
 
         # When by is None, check that all relevant lines are present in the
         # dict
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         d = df.boxplot(ax=ax, return_type="dict")
         lines = list(itertools.chain.from_iterable(d.values()))
         assert len(ax.get_lines()) == len(lines)
@@ -110,7 +112,7 @@ def test_boxplot_legacy2(self):
     def test_boxplot_return_type_none(self, hist_df):
         # GH 12216; return_type=None & by=None -> axes
         result = hist_df.boxplot()
-        assert isinstance(result, self.plt.Axes)
+        assert isinstance(result, mpl.pyplot.Axes)
 
     def test_boxplot_return_type_legacy(self):
         # API change in https://github.com/pandas-dev/pandas/pull/7096
@@ -125,19 +127,19 @@ def test_boxplot_return_type_legacy(self):
             df.boxplot(return_type="NOT_A_TYPE")
 
         result = df.boxplot()
-        self._check_box_return_type(result, "axes")
+        _check_box_return_type(result, "axes")
 
         with tm.assert_produces_warning(False):
             result = df.boxplot(return_type="dict")
-        self._check_box_return_type(result, "dict")
+        _check_box_return_type(result, "dict")
 
         with tm.assert_produces_warning(False):
             result = df.boxplot(return_type="axes")
-        self._check_box_return_type(result, "axes")
+        _check_box_return_type(result, "axes")
 
         with tm.assert_produces_warning(False):
             result = df.boxplot(return_type="both")
-        self._check_box_return_type(result, "both")
+        _check_box_return_type(result, "both")
 
     def test_boxplot_axis_limits(self, hist_df):
         def _check_ax_limits(col, ax):
@@ -178,9 +180,7 @@ def test_figsize(self):
 
     def test_fontsize(self):
         df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
-        self._check_ticks_props(
-            df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
-        )
+        _check_ticks_props(df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16)
 
     def test_boxplot_numeric_data(self):
         # GH 22799
@@ -317,24 +317,23 @@ def test_boxplot_group_xlabel_ylabel(self, vert):
         for subplot in ax:
             assert subplot.get_xlabel() == xlabel
             assert subplot.get_ylabel() == ylabel
-        self.plt.close()
+        mpl.pyplot.close()
 
         ax = df.boxplot(by="group", vert=vert)
         for subplot in ax:
             target_label = subplot.get_xlabel() if vert else subplot.get_ylabel()
             assert target_label == pprint_thing(["group"])
-        self.plt.close()
+        mpl.pyplot.close()
 
 
-@td.skip_if_no_mpl
-class TestDataFrameGroupByPlots(TestPlotBase):
+class TestDataFrameGroupByPlots:
     def test_boxplot_legacy1(self, hist_df):
         grouped = hist_df.groupby(by="gender")
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             axes = _check_plot_works(grouped.boxplot, return_type="axes")
-        self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
+        _check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
         axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
 
     @pytest.mark.slow
     def test_boxplot_legacy2(self):
@@ -343,10 +342,10 @@ def test_boxplot_legacy2(self):
         grouped = df.groupby(level=1)
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             axes = _check_plot_works(grouped.boxplot, return_type="axes")
-        self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
+        _check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
 
         axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
 
     def test_boxplot_legacy3(self):
         tuples = zip(string.ascii_letters[:10], range(10))
@@ -356,9 +355,9 @@ def test_boxplot_legacy3(self):
             grouped = df.unstack(level=1).groupby(level=0, axis=1)
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             axes = _check_plot_works(grouped.boxplot, return_type="axes")
-        self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
+        _check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
         axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
 
     def test_grouped_plot_fignums(self):
         n = 10
@@ -369,12 +368,12 @@ def test_grouped_plot_fignums(self):
         gb = df.groupby("gender")
 
         res = gb.plot()
-        assert len(self.plt.get_fignums()) == 2
+        assert len(mpl.pyplot.get_fignums()) == 2
         assert len(res) == 2
         tm.close()
 
         res = gb.boxplot(return_type="axes")
-        assert len(self.plt.get_fignums()) == 1
+        assert len(mpl.pyplot.get_fignums()) == 1
         assert len(res) == 2
         tm.close()
 
@@ -389,42 +388,55 @@ def test_grouped_box_return_type(self, hist_df):
         # old style: return_type=None
         result = df.boxplot(by="gender")
         assert isinstance(result, np.ndarray)
-        self._check_box_return_type(
+        _check_box_return_type(
             result, None, expected_keys=["height", "weight", "category"]
         )
 
+    @pytest.mark.slow
+    def test_grouped_box_return_type_groupby(self, hist_df):
+        df = hist_df
         # now for groupby
         result = df.groupby("gender").boxplot(return_type="dict")
-        self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
+        _check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
 
-        columns2 = "X B C D A G Y N Q O".split()
-        df2 = DataFrame(np.random.randn(50, 10), columns=columns2)
-        categories2 = "A B C D E F G H I J".split()
-        df2["category"] = categories2 * 5
+    @pytest.mark.slow
+    @pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
+    def test_grouped_box_return_type_arg(self, hist_df, return_type):
+        df = hist_df
 
-        for t in ["dict", "axes", "both"]:
-            returned = df.groupby("classroom").boxplot(return_type=t)
-            self._check_box_return_type(returned, t, expected_keys=["A", "B", "C"])
+        returned = df.groupby("classroom").boxplot(return_type=return_type)
+        _check_box_return_type(returned, return_type, expected_keys=["A", "B", "C"])
 
-            returned = df.boxplot(by="classroom", return_type=t)
-            self._check_box_return_type(
-                returned, t, expected_keys=["height", "weight", "category"]
-            )
+        returned = df.boxplot(by="classroom", return_type=return_type)
+        _check_box_return_type(
+            returned, return_type, expected_keys=["height", "weight", "category"]
+        )
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
+    def test_grouped_box_return_type_arg_duplcate_cats(self, return_type):
+        columns2 = "X B C D A".split()
+        df2 = DataFrame(np.random.randn(6, 5), columns=columns2)
+        categories2 = "A B".split()
+        df2["category"] = categories2 * 3
 
-            returned = df2.groupby("category").boxplot(return_type=t)
-            self._check_box_return_type(returned, t, expected_keys=categories2)
+        returned = df2.groupby("category").boxplot(return_type=return_type)
+        _check_box_return_type(returned, return_type, expected_keys=categories2)
 
-            returned = df2.boxplot(by="category", return_type=t)
-            self._check_box_return_type(returned, t, expected_keys=columns2)
+        returned = df2.boxplot(by="category", return_type=return_type)
+        _check_box_return_type(returned, return_type, expected_keys=columns2)
 
     @pytest.mark.slow
-    def test_grouped_box_layout(self, hist_df):
+    def test_grouped_box_layout_too_small(self, hist_df):
         df = hist_df
 
         msg = "Layout of 1x1 must be larger than required size 2"
         with pytest.raises(ValueError, match=msg):
             df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
 
+    @pytest.mark.slow
+    def test_grouped_box_layout_needs_by(self, hist_df):
+        df = hist_df
         msg = "The 'layout' keyword is not supported when 'by' is None"
         with pytest.raises(ValueError, match=msg):
             df.boxplot(
@@ -433,79 +445,84 @@ def test_grouped_box_layout(self, hist_df):
                 return_type="dict",
             )
 
+    @pytest.mark.slow
+    def test_grouped_box_layout_positive_layout(self, hist_df):
+        df = hist_df
         msg = "At least one dimension of layout must be positive"
         with pytest.raises(ValueError, match=msg):
             df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
 
-        # _check_plot_works adds an ax so catch warning. see GH #13188
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            _check_plot_works(
-                df.groupby("gender").boxplot, column="height", return_type="dict"
-            )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
-
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            _check_plot_works(
-                df.groupby("category").boxplot, column="height", return_type="dict"
-            )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
-
-        # GH 6769
+    @pytest.mark.slow
+    @pytest.mark.parametrize(
+        "gb_key, axes_num, rows",
+        [["gender", 2, 1], ["category", 4, 2], ["classroom", 3, 2]],
+    )
+    def test_grouped_box_layout_positive_layout_axes(
+        self, hist_df, gb_key, axes_num, rows
+    ):
+        df = hist_df
+        # _check_plot_works adds an ax so catch warning. see GH #13188 GH 6769
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             _check_plot_works(
-                df.groupby("classroom").boxplot, column="height", return_type="dict"
+                df.groupby(gb_key).boxplot, column="height", return_type="dict"
             )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=axes_num, layout=(rows, 2))
 
+    @pytest.mark.slow
+    @pytest.mark.parametrize(
+        "col, visible", [["height", False], ["weight", True], ["category", True]]
+    )
+    def test_grouped_box_layout_visible(self, hist_df, col, visible):
+        df = hist_df
         # GH 5897
         axes = df.boxplot(
             column=["height", "weight", "category"], by="gender", return_type="axes"
         )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
-        for ax in [axes["height"]]:
-            self._check_visible(ax.get_xticklabels(), visible=False)
-            self._check_visible([ax.xaxis.get_label()], visible=False)
-        for ax in [axes["weight"], axes["category"]]:
-            self._check_visible(ax.get_xticklabels())
-            self._check_visible([ax.xaxis.get_label()])
+        _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2))
+        ax = axes[col]
+        _check_visible(ax.get_xticklabels(), visible=visible)
+        _check_visible([ax.xaxis.get_label()], visible=visible)
 
+    @pytest.mark.slow
+    def test_grouped_box_layout_shape(self, hist_df):
+        df = hist_df
         df.groupby("classroom").boxplot(
             column=["height", "weight", "category"], return_type="dict"
         )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2))
 
+    @pytest.mark.slow
+    @pytest.mark.parametrize("cols", [2, -1])
+    def test_grouped_box_layout_works(self, hist_df, cols):
+        df = hist_df
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             _check_plot_works(
                 df.groupby("category").boxplot,
                 column="height",
-                layout=(3, 2),
-                return_type="dict",
-            )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            _check_plot_works(
-                df.groupby("category").boxplot,
-                column="height",
-                layout=(3, -1),
+                layout=(3, cols),
                 return_type="dict",
             )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
-
-        df.boxplot(column=["height", "weight", "category"], by="gender", layout=(4, 1))
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
+        _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(3, 2))
 
-        df.boxplot(column=["height", "weight", "category"], by="gender", layout=(-1, 1))
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
-
-        df.groupby("classroom").boxplot(
-            column=["height", "weight", "category"], layout=(1, 4), return_type="dict"
+    @pytest.mark.slow
+    @pytest.mark.parametrize("rows, res", [[4, 4], [-1, 3]])
+    def test_grouped_box_layout_axes_shape_rows(self, hist_df, rows, res):
+        df = hist_df
+        df.boxplot(
+            column=["height", "weight", "category"], by="gender", layout=(rows, 1)
         )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
+        _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(res, 1))
 
+    @pytest.mark.slow
+    @pytest.mark.parametrize("cols, res", [[4, 4], [-1, 3]])
+    def test_grouped_box_layout_axes_shape_cols_groupby(self, hist_df, cols, res):
+        df = hist_df
         df.groupby("classroom").boxplot(
-            column=["height", "weight", "category"], layout=(1, -1), return_type="dict"
+            column=["height", "weight", "category"],
+            layout=(1, cols),
+            return_type="dict",
         )
-        self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
+        _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(1, res))
 
     @pytest.mark.slow
     def test_grouped_box_multiple_axes(self, hist_df):
@@ -518,11 +535,11 @@ def test_grouped_box_multiple_axes(self, hist_df):
         # location should be changed if other test is added
         # which has earlier alphabetical order
         with tm.assert_produces_warning(UserWarning):
-            fig, axes = self.plt.subplots(2, 2)
+            fig, axes = mpl.pyplot.subplots(2, 2)
             df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
-            self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
+            _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(2, 2))
 
-        fig, axes = self.plt.subplots(2, 3)
+        fig, axes = mpl.pyplot.subplots(2, 3)
         with tm.assert_produces_warning(UserWarning):
             returned = df.boxplot(
                 column=["height", "weight", "category"],
@@ -531,7 +548,7 @@ def test_grouped_box_multiple_axes(self, hist_df):
                 ax=axes[0],
             )
         returned = np.array(list(returned.values))
-        self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
+        _check_axes_shape(returned, axes_num=3, layout=(1, 3))
         tm.assert_numpy_array_equal(returned, axes[0])
         assert returned[0].figure is fig
 
@@ -541,20 +558,20 @@ def test_grouped_box_multiple_axes(self, hist_df):
                 column=["height", "weight", "category"], return_type="axes", ax=axes[1]
             )
         returned = np.array(list(returned.values))
-        self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
+        _check_axes_shape(returned, axes_num=3, layout=(1, 3))
         tm.assert_numpy_array_equal(returned, axes[1])
         assert returned[0].figure is fig
 
         msg = "The number of passed axes must be 3, the same as the output plot"
         with pytest.raises(ValueError, match=msg):
-            fig, axes = self.plt.subplots(2, 3)
+            fig, axes = mpl.pyplot.subplots(2, 3)
             # pass different number of axes from required
             with tm.assert_produces_warning(UserWarning):
                 axes = df.groupby("classroom").boxplot(ax=axes)
 
     def test_fontsize(self):
         df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
-        self._check_ticks_props(
+        _check_ticks_props(
             df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
         )
 
diff --git a/pandas/tests/plotting/test_common.py b/pandas/tests/plotting/test_common.py
index faf8278675566..20daf59356248 100644
--- a/pandas/tests/plotting/test_common.py
+++ b/pandas/tests/plotting/test_common.py
@@ -1,17 +1,16 @@
 import pytest
 
-import pandas.util._test_decorators as td
-
 from pandas import DataFrame
 from pandas.tests.plotting.common import (
-    TestPlotBase,
     _check_plot_works,
+    _check_ticks_props,
     _gen_two_subplots,
 )
 
+plt = pytest.importorskip("matplotlib.pyplot")
+
 
-@td.skip_if_no_mpl
-class TestCommon(TestPlotBase):
+class TestCommon:
     def test__check_ticks_props(self):
         # GH 34768
         df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]})
@@ -19,16 +18,16 @@ def test__check_ticks_props(self):
         ax.yaxis.set_tick_params(rotation=30)
         msg = "expected 0.00000 but got "
         with pytest.raises(AssertionError, match=msg):
-            self._check_ticks_props(ax, xrot=0)
+            _check_ticks_props(ax, xrot=0)
         with pytest.raises(AssertionError, match=msg):
-            self._check_ticks_props(ax, xlabelsize=0)
+            _check_ticks_props(ax, xlabelsize=0)
         with pytest.raises(AssertionError, match=msg):
-            self._check_ticks_props(ax, yrot=0)
+            _check_ticks_props(ax, yrot=0)
         with pytest.raises(AssertionError, match=msg):
-            self._check_ticks_props(ax, ylabelsize=0)
+            _check_ticks_props(ax, ylabelsize=0)
 
     def test__gen_two_subplots_with_ax(self):
-        fig = self.plt.gcf()
+        fig = plt.gcf()
         gen = _gen_two_subplots(f=lambda **kwargs: None, fig=fig, ax="test")
         # On the first yield, no subplot should be added since ax was passed
         next(gen)
@@ -42,7 +41,7 @@ def test__gen_two_subplots_with_ax(self):
         assert subplot_geometry == [2, 1, 2]
 
     def test_colorbar_layout(self):
-        fig = self.plt.figure()
+        fig = plt.figure()
 
         axes = fig.subplot_mosaic(
             """
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index dacc0cbea7c9e..dda71328d4e6c 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -37,13 +37,14 @@
     period_range,
 )
 from pandas.core.indexes.timedeltas import timedelta_range
-from pandas.tests.plotting.common import TestPlotBase
+from pandas.tests.plotting.common import _check_ticks_props
 
 from pandas.tseries.offsets import WeekOfMonth
 
+mpl = pytest.importorskip("matplotlib")
 
-@td.skip_if_no_mpl
-class TestTSPlot(TestPlotBase):
+
+class TestTSPlot:
     @pytest.mark.filterwarnings("ignore::UserWarning")
     def test_ts_plot_with_tz(self, tz_aware_fixture):
         # GH2877, GH17173, GH31205, GH31580
@@ -60,7 +61,7 @@ def test_ts_plot_with_tz(self, tz_aware_fixture):
     def test_fontsize_set_correctly(self):
         # For issue #8765
         df = DataFrame(np.random.randn(10, 9), index=range(10))
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         df.plot(fontsize=2, ax=ax)
         for label in ax.get_xticklabels() + ax.get_yticklabels():
             assert label.get_fontsize() == 2
@@ -95,10 +96,10 @@ def test_nonnumeric_exclude(self):
         idx = date_range("1/1/1987", freq="A", periods=3)
         df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx)
 
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         df.plot(ax=ax)  # it works
         assert len(ax.get_lines()) == 1  # B was plotted
-        self.plt.close(fig)
+        mpl.pyplot.close(fig)
 
         msg = "no numeric data to plot"
         with pytest.raises(TypeError, match=msg):
@@ -108,7 +109,7 @@ def test_nonnumeric_exclude(self):
     def test_tsplot_period(self, freq):
         idx = period_range("12/31/1999", freq=freq, periods=100)
         ser = Series(np.random.randn(len(idx)), idx)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         _check_plot_works(ser.plot, ax=ax)
 
     @pytest.mark.parametrize(
@@ -117,12 +118,12 @@ def test_tsplot_period(self, freq):
     def test_tsplot_datetime(self, freq):
         idx = date_range("12/31/1999", freq=freq, periods=100)
         ser = Series(np.random.randn(len(idx)), idx)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         _check_plot_works(ser.plot, ax=ax)
 
     def test_tsplot(self):
         ts = tm.makeTimeSeries()
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ts.plot(style="k", ax=ax)
         color = (0.0, 0.0, 0.0, 1)
         assert color == ax.get_lines()[0].get_color()
@@ -143,7 +144,7 @@ def test_both_style_and_color(self):
 
     @pytest.mark.parametrize("freq", ["ms", "us"])
     def test_high_freq(self, freq):
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         rng = date_range("1/1/2012", periods=100, freq=freq)
         ser = Series(np.random.randn(len(rng)), rng)
         _check_plot_works(ser.plot, ax=ax)
@@ -164,7 +165,7 @@ def check_format_of_first_point(ax, expected_string):
             assert expected_string == ax.format_coord(first_x, first_y)
 
         annual = Series(1, index=date_range("2014-01-01", periods=3, freq="A-DEC"))
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         annual.plot(ax=ax)
         check_format_of_first_point(ax, "t = 2014  y = 1.000000")
 
@@ -239,7 +240,7 @@ def test_line_plot_inferred_freq(self, freq):
         _check_plot_works(ser.plot)
 
     def test_fake_inferred_business(self):
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         rng = date_range("2001-1-1", "2001-1-10")
         ts = Series(range(len(rng)), index=rng)
         ts = concat([ts[:3], ts[5:]])
@@ -266,7 +267,7 @@ def test_uhf(self):
         idx = date_range("2012-6-22 21:59:51.960928", freq="L", periods=500)
         df = DataFrame(np.random.randn(len(idx), 2), index=idx)
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         df.plot(ax=ax)
         axis = ax.get_xaxis()
 
@@ -283,14 +284,14 @@ def test_irreg_hf(self):
         df = DataFrame(np.random.randn(len(idx), 2), index=idx)
 
         irreg = df.iloc[[0, 1, 3, 4]]
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         irreg.plot(ax=ax)
         diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
 
         sec = 1.0 / 24 / 60 / 60
         assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         df2 = df.copy()
         df2.index = df.index.astype(object)
         df2.plot(ax=ax)
@@ -301,7 +302,7 @@ def test_irregular_datetime64_repr_bug(self):
         ser = tm.makeTimeSeries()
         ser = ser.iloc[[0, 1, 2, 7]]
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
 
         ret = ser.plot(ax=ax)
         assert ret is not None
@@ -311,7 +312,7 @@ def test_irregular_datetime64_repr_bug(self):
 
     def test_business_freq(self):
         bts = tm.makePeriodSeries()
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         bts.plot(ax=ax)
         assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal
         idx = ax.get_lines()[0].get_xdata()
@@ -320,7 +321,7 @@ def test_business_freq(self):
     def test_business_freq_convert(self):
         bts = tm.makeTimeSeries(300).asfreq("BM")
         ts = bts.to_period("M")
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         bts.plot(ax=ax)
         assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal
         idx = ax.get_lines()[0].get_xdata()
@@ -330,7 +331,7 @@ def test_freq_with_no_period_alias(self):
         # GH34487
         freq = WeekOfMonth()
         bts = tm.makeTimeSeries(5).asfreq(freq)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         bts.plot(ax=ax)
 
         idx = ax.get_lines()[0].get_xdata()
@@ -342,14 +343,14 @@ def test_nonzero_base(self):
         # GH2571
         idx = date_range("2012-12-20", periods=24, freq="H") + timedelta(minutes=30)
         df = DataFrame(np.arange(24), index=idx)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         df.plot(ax=ax)
         rs = ax.get_lines()[0].get_xdata()
         assert not Index(rs).is_normalized
 
     def test_dataframe(self):
         bts = DataFrame({"a": tm.makeTimeSeries()})
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         bts.plot(ax=ax)
         idx = ax.get_lines()[0].get_xdata()
         tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
@@ -376,14 +377,14 @@ def _test(ax):
             assert int(result[0]) == expected[0].ordinal
             assert int(result[1]) == expected[1].ordinal
             fig = ax.get_figure()
-            self.plt.close(fig)
+            mpl.pyplot.close(fig)
 
         ser = tm.makeTimeSeries()
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ser.plot(ax=ax)
         _test(ax)
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         df = DataFrame({"a": ser, "b": ser + 1})
         df.plot(ax=ax)
         _test(ax)
@@ -413,7 +414,7 @@ def test_finder_daily(self):
         for n in day_lst:
             rng = bdate_range("1999-1-1", periods=n)
             ser = Series(np.random.randn(len(rng)), rng)
-            _, ax = self.plt.subplots()
+            _, ax = mpl.pyplot.subplots()
             ser.plot(ax=ax)
             xaxis = ax.get_xaxis()
             rs1.append(xaxis.get_majorticklocs()[0])
@@ -421,7 +422,7 @@ def test_finder_daily(self):
             vmin, vmax = ax.get_xlim()
             ax.set_xlim(vmin + 0.9, vmax)
             rs2.append(xaxis.get_majorticklocs()[0])
-            self.plt.close(ax.get_figure())
+            mpl.pyplot.close(ax.get_figure())
 
         assert rs1 == xpl1
         assert rs2 == xpl2
@@ -435,7 +436,7 @@ def test_finder_quarterly(self):
         for n in yrs:
             rng = period_range("1987Q2", periods=int(n * 4), freq="Q")
             ser = Series(np.random.randn(len(rng)), rng)
-            _, ax = self.plt.subplots()
+            _, ax = mpl.pyplot.subplots()
             ser.plot(ax=ax)
             xaxis = ax.get_xaxis()
             rs1.append(xaxis.get_majorticklocs()[0])
@@ -443,7 +444,7 @@ def test_finder_quarterly(self):
             (vmin, vmax) = ax.get_xlim()
             ax.set_xlim(vmin + 0.9, vmax)
             rs2.append(xaxis.get_majorticklocs()[0])
-            self.plt.close(ax.get_figure())
+            mpl.pyplot.close(ax.get_figure())
 
         assert rs1 == xpl1
         assert rs2 == xpl2
@@ -457,7 +458,7 @@ def test_finder_monthly(self):
         for n in yrs:
             rng = period_range("1987Q2", periods=int(n * 12), freq="M")
             ser = Series(np.random.randn(len(rng)), rng)
-            _, ax = self.plt.subplots()
+            _, ax = mpl.pyplot.subplots()
             ser.plot(ax=ax)
             xaxis = ax.get_xaxis()
             rs1.append(xaxis.get_majorticklocs()[0])
@@ -465,7 +466,7 @@ def test_finder_monthly(self):
             vmin, vmax = ax.get_xlim()
             ax.set_xlim(vmin + 0.9, vmax)
             rs2.append(xaxis.get_majorticklocs()[0])
-            self.plt.close(ax.get_figure())
+            mpl.pyplot.close(ax.get_figure())
 
         assert rs1 == xpl1
         assert rs2 == xpl2
@@ -473,7 +474,7 @@ def test_finder_monthly(self):
     def test_finder_monthly_long(self):
         rng = period_range("1988Q1", periods=24 * 12, freq="M")
         ser = Series(np.random.randn(len(rng)), rng)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ser.plot(ax=ax)
         xaxis = ax.get_xaxis()
         rs = xaxis.get_majorticklocs()[0]
@@ -487,11 +488,11 @@ def test_finder_annual(self):
         for nyears in [5, 10, 19, 49, 99, 199, 599, 1001]:
             rng = period_range("1987", periods=nyears, freq="A")
             ser = Series(np.random.randn(len(rng)), rng)
-            _, ax = self.plt.subplots()
+            _, ax = mpl.pyplot.subplots()
             ser.plot(ax=ax)
             xaxis = ax.get_xaxis()
             rs.append(xaxis.get_majorticklocs()[0])
-            self.plt.close(ax.get_figure())
+            mpl.pyplot.close(ax.get_figure())
 
         assert rs == xp
 
@@ -500,7 +501,7 @@ def test_finder_minutely(self):
         nminutes = 50 * 24 * 60
         rng = date_range("1/1/1999", freq="Min", periods=nminutes)
         ser = Series(np.random.randn(len(rng)), rng)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ser.plot(ax=ax)
         xaxis = ax.get_xaxis()
         rs = xaxis.get_majorticklocs()[0]
@@ -512,7 +513,7 @@ def test_finder_hourly(self):
         nhours = 23
         rng = date_range("1/1/1999", freq="H", periods=nhours)
         ser = Series(np.random.randn(len(rng)), rng)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ser.plot(ax=ax)
         xaxis = ax.get_xaxis()
         rs = xaxis.get_majorticklocs()[0]
@@ -523,7 +524,7 @@ def test_finder_hourly(self):
     def test_gaps(self):
         ts = tm.makeTimeSeries()
         ts.iloc[5:25] = np.nan
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ts.plot(ax=ax)
         lines = ax.get_lines()
         assert len(lines) == 1
@@ -535,13 +536,13 @@ def test_gaps(self):
         assert isinstance(data, np.ma.core.MaskedArray)
         mask = data.mask
         assert mask[5:25, 1].all()
-        self.plt.close(ax.get_figure())
+        mpl.pyplot.close(ax.get_figure())
 
         # irregular
         ts = tm.makeTimeSeries()
         ts = ts.iloc[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
         ts.iloc[2:5] = np.nan
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot(ax=ax)
         lines = ax.get_lines()
         assert len(lines) == 1
@@ -553,13 +554,13 @@ def test_gaps(self):
         assert isinstance(data, np.ma.core.MaskedArray)
         mask = data.mask
         assert mask[2:5, 1].all()
-        self.plt.close(ax.get_figure())
+        mpl.pyplot.close(ax.get_figure())
 
         # non-ts
         idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
         ser = Series(np.random.randn(len(idx)), idx)
         ser.iloc[2:5] = np.nan
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ser.plot(ax=ax)
         lines = ax.get_lines()
         assert len(lines) == 1
@@ -574,7 +575,7 @@ def test_gaps(self):
     def test_gap_upsample(self):
         low = tm.makeTimeSeries()
         low.iloc[5:25] = np.nan
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         low.plot(ax=ax)
 
         idxh = date_range(low.index[0], low.index[-1], freq="12h")
@@ -595,7 +596,7 @@ def test_gap_upsample(self):
     def test_secondary_y(self):
         ser = Series(np.random.randn(10))
         ser2 = Series(np.random.randn(10))
-        fig, _ = self.plt.subplots()
+        fig, _ = mpl.pyplot.subplots()
         ax = ser.plot(secondary_y=True)
         assert hasattr(ax, "left_ax")
         assert not hasattr(ax, "right_ax")
@@ -605,12 +606,12 @@ def test_secondary_y(self):
         tm.assert_series_equal(ser, xp)
         assert ax.get_yaxis().get_ticks_position() == "right"
         assert not axes[0].get_yaxis().get_visible()
-        self.plt.close(fig)
+        mpl.pyplot.close(fig)
 
-        _, ax2 = self.plt.subplots()
+        _, ax2 = mpl.pyplot.subplots()
         ser2.plot(ax=ax2)
         assert ax2.get_yaxis().get_ticks_position() == "left"
-        self.plt.close(ax2.get_figure())
+        mpl.pyplot.close(ax2.get_figure())
 
         ax = ser2.plot()
         ax2 = ser.plot(secondary_y=True)
@@ -624,7 +625,7 @@ def test_secondary_y_ts(self):
         idx = date_range("1/1/2000", periods=10)
         ser = Series(np.random.randn(10), idx)
         ser2 = Series(np.random.randn(10), idx)
-        fig, _ = self.plt.subplots()
+        fig, _ = mpl.pyplot.subplots()
         ax = ser.plot(secondary_y=True)
         assert hasattr(ax, "left_ax")
         assert not hasattr(ax, "right_ax")
@@ -634,12 +635,12 @@ def test_secondary_y_ts(self):
         tm.assert_series_equal(ser, xp)
         assert ax.get_yaxis().get_ticks_position() == "right"
         assert not axes[0].get_yaxis().get_visible()
-        self.plt.close(fig)
+        mpl.pyplot.close(fig)
 
-        _, ax2 = self.plt.subplots()
+        _, ax2 = mpl.pyplot.subplots()
         ser2.plot(ax=ax2)
         assert ax2.get_yaxis().get_ticks_position() == "left"
-        self.plt.close(ax2.get_figure())
+        mpl.pyplot.close(ax2.get_figure())
 
         ax = ser2.plot()
         ax2 = ser.plot(secondary_y=True)
@@ -648,7 +649,7 @@ def test_secondary_y_ts(self):
     @td.skip_if_no_scipy
     def test_secondary_kde(self):
         ser = Series(np.random.randn(10))
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         ax = ser.plot(secondary_y=True, kind="density", ax=ax)
         assert hasattr(ax, "left_ax")
         assert not hasattr(ax, "right_ax")
@@ -657,7 +658,7 @@ def test_secondary_kde(self):
 
     def test_secondary_bar(self):
         ser = Series(np.random.randn(10))
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         ser.plot(secondary_y=True, kind="bar", ax=ax)
         axes = fig.get_axes()
         assert axes[1].get_yaxis().get_ticks_position() == "right"
@@ -682,7 +683,7 @@ def test_mixed_freq_regular_first(self):
         s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15]]
 
         # it works!
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s1.plot(ax=ax)
 
         ax2 = s2.plot(style="g", ax=ax)
@@ -701,7 +702,7 @@ def test_mixed_freq_regular_first(self):
     def test_mixed_freq_irregular_first(self):
         s1 = tm.makeTimeSeries()
         s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15]]
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s2.plot(style="g", ax=ax)
         s1.plot(ax=ax)
         assert not hasattr(ax, "freq")
@@ -715,7 +716,7 @@ def test_mixed_freq_regular_first_df(self):
         # GH 9852
         s1 = tm.makeTimeSeries().to_frame()
         s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s1.plot(ax=ax)
         ax2 = s2.plot(style="g", ax=ax)
         lines = ax2.get_lines()
@@ -732,7 +733,7 @@ def test_mixed_freq_irregular_first_df(self):
         # GH 9852
         s1 = tm.makeTimeSeries().to_frame()
         s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s2.plot(style="g", ax=ax)
         s1.plot(ax=ax)
         assert not hasattr(ax, "freq")
@@ -747,7 +748,7 @@ def test_mixed_freq_hf_first(self):
         idxl = date_range("1/1/1999", periods=12, freq="M")
         high = Series(np.random.randn(len(idxh)), idxh)
         low = Series(np.random.randn(len(idxl)), idxl)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         high.plot(ax=ax)
         low.plot(ax=ax)
         for line in ax.get_lines():
@@ -760,7 +761,7 @@ def test_mixed_freq_alignment(self):
         ts = Series(ts_data, index=ts_ind)
         ts2 = ts.asfreq("T").interpolate()
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot(ax=ax)
         ts2.plot(style="r", ax=ax)
 
@@ -771,20 +772,20 @@ def test_mixed_freq_lf_first(self):
         idxl = date_range("1/1/1999", periods=12, freq="M")
         high = Series(np.random.randn(len(idxh)), idxh)
         low = Series(np.random.randn(len(idxl)), idxl)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         low.plot(legend=True, ax=ax)
         high.plot(legend=True, ax=ax)
         for line in ax.get_lines():
             assert PeriodIndex(data=line.get_xdata()).freq == "D"
         leg = ax.get_legend()
         assert len(leg.texts) == 2
-        self.plt.close(ax.get_figure())
+        mpl.pyplot.close(ax.get_figure())
 
         idxh = date_range("1/1/1999", periods=240, freq="T")
         idxl = date_range("1/1/1999", periods=4, freq="H")
         high = Series(np.random.randn(len(idxh)), idxh)
         low = Series(np.random.randn(len(idxl)), idxl)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         low.plot(ax=ax)
         high.plot(ax=ax)
         for line in ax.get_lines():
@@ -795,7 +796,7 @@ def test_mixed_freq_irreg_period(self):
         irreg = ts.iloc[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
         rng = period_range("1/3/2000", periods=30, freq="B")
         ps = Series(np.random.randn(len(rng)), rng)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         irreg.plot(ax=ax)
         ps.plot(ax=ax)
 
@@ -806,7 +807,7 @@ def test_mixed_freq_shared_ax(self):
         s1 = Series(range(len(idx1)), idx1)
         s2 = Series(range(len(idx2)), idx2)
 
-        fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
+        fig, (ax1, ax2) = mpl.pyplot.subplots(nrows=2, sharex=True)
         s1.plot(ax=ax1)
         s2.plot(ax=ax2)
 
@@ -815,7 +816,7 @@ def test_mixed_freq_shared_ax(self):
         assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0]
 
         # using twinx
-        fig, ax1 = self.plt.subplots()
+        fig, ax1 = mpl.pyplot.subplots()
         ax2 = ax1.twinx()
         s1.plot(ax=ax1)
         s2.plot(ax=ax2)
@@ -832,7 +833,7 @@ def test_mixed_freq_shared_ax(self):
         #         ax2.lines[0].get_xydata()[0, 0])
 
     def test_nat_handling(self):
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
 
         dti = DatetimeIndex(["2015-01-01", NaT, "2015-01-03"])
         s = Series(range(len(dti)), dti)
@@ -847,7 +848,7 @@ def test_to_weekly_resampling(self):
         idxl = date_range("1/1/1999", periods=12, freq="M")
         high = Series(np.random.randn(len(idxh)), idxh)
         low = Series(np.random.randn(len(idxl)), idxl)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         high.plot(ax=ax)
         low.plot(ax=ax)
         for line in ax.get_lines():
@@ -858,7 +859,7 @@ def test_from_weekly_resampling(self):
         idxl = date_range("1/1/1999", periods=12, freq="M")
         high = Series(np.random.randn(len(idxh)), idxh)
         low = Series(np.random.randn(len(idxl)), idxl)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         low.plot(ax=ax)
         high.plot(ax=ax)
 
@@ -884,7 +885,7 @@ def test_from_resampling_area_line_mixed(self):
 
         # low to high
         for kind1, kind2 in [("line", "area"), ("area", "line")]:
-            _, ax = self.plt.subplots()
+            _, ax = mpl.pyplot.subplots()
             low.plot(kind=kind1, stacked=True, ax=ax)
             high.plot(kind=kind2, stacked=True, ax=ax)
 
@@ -927,7 +928,7 @@ def test_from_resampling_area_line_mixed(self):
 
         # high to low
         for kind1, kind2 in [("line", "area"), ("area", "line")]:
-            _, ax = self.plt.subplots()
+            _, ax = mpl.pyplot.subplots()
             high.plot(kind=kind1, stacked=True, ax=ax)
             low.plot(kind=kind2, stacked=True, ax=ax)
 
@@ -974,7 +975,7 @@ def test_mixed_freq_second_millisecond(self):
         high = Series(np.random.randn(len(idxh)), idxh)
         low = Series(np.random.randn(len(idxl)), idxl)
         # high to low
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         high.plot(ax=ax)
         low.plot(ax=ax)
         assert len(ax.get_lines()) == 2
@@ -983,7 +984,7 @@ def test_mixed_freq_second_millisecond(self):
         tm.close()
 
         # low to high
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         low.plot(ax=ax)
         high.plot(ax=ax)
         assert len(ax.get_lines()) == 2
@@ -1000,7 +1001,7 @@ def test_irreg_dtypes(self):
         idx = date_range("1/1/2000", periods=10)
         idx = idx[[0, 2, 5, 9]].astype(object)
         df = DataFrame(np.random.randn(len(idx), 3), idx)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         _check_plot_works(df.plot, ax=ax)
 
     def test_time(self):
@@ -1010,7 +1011,7 @@ def test_time(self):
         df = DataFrame(
             {"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts
         )
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         df.plot(ax=ax)
 
         # verify tick labels
@@ -1034,7 +1035,7 @@ def test_time_change_xlim(self):
         df = DataFrame(
             {"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts
         )
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         df.plot(ax=ax)
 
         # verify tick labels
@@ -1075,7 +1076,7 @@ def test_time_musec(self):
         df = DataFrame(
             {"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts
         )
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         ax = df.plot(ax=ax)
 
         # verify tick labels
@@ -1104,7 +1105,7 @@ def test_secondary_upsample(self):
         idxl = date_range("1/1/1999", periods=12, freq="M")
         high = Series(np.random.randn(len(idxh)), idxh)
         low = Series(np.random.randn(len(idxl)), idxl)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         low.plot(ax=ax)
         ax = high.plot(secondary_y=True, ax=ax)
         for line in ax.get_lines():
@@ -1115,7 +1116,7 @@ def test_secondary_upsample(self):
             assert PeriodIndex(line.get_xdata()).freq == "D"
 
     def test_secondary_legend(self):
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         ax = fig.add_subplot(211)
 
         # ts
@@ -1134,9 +1135,9 @@ def test_secondary_legend(self):
 
         # TODO: color cycle problems
         assert len(colors) == 4
-        self.plt.close(fig)
+        mpl.pyplot.close(fig)
 
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         ax = fig.add_subplot(211)
         df.plot(secondary_y=["A", "C"], mark_right=False, ax=ax)
         leg = ax.get_legend()
@@ -1145,23 +1146,23 @@ def test_secondary_legend(self):
         assert leg.get_texts()[1].get_text() == "B"
         assert leg.get_texts()[2].get_text() == "C"
         assert leg.get_texts()[3].get_text() == "D"
-        self.plt.close(fig)
+        mpl.pyplot.close(fig)
 
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         df.plot(kind="bar", secondary_y=["A"], ax=ax)
         leg = ax.get_legend()
         assert leg.get_texts()[0].get_text() == "A (right)"
         assert leg.get_texts()[1].get_text() == "B"
-        self.plt.close(fig)
+        mpl.pyplot.close(fig)
 
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         df.plot(kind="bar", secondary_y=["A"], mark_right=False, ax=ax)
         leg = ax.get_legend()
         assert leg.get_texts()[0].get_text() == "A"
         assert leg.get_texts()[1].get_text() == "B"
-        self.plt.close(fig)
+        mpl.pyplot.close(fig)
 
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         ax = fig.add_subplot(211)
         df = tm.makeTimeDataFrame()
         ax = df.plot(secondary_y=["C", "D"], ax=ax)
@@ -1174,11 +1175,11 @@ def test_secondary_legend(self):
 
         # TODO: color cycle problems
         assert len(colors) == 4
-        self.plt.close(fig)
+        mpl.pyplot.close(fig)
 
         # non-ts
         df = tm.makeDataFrame()
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         ax = fig.add_subplot(211)
         ax = df.plot(secondary_y=["A", "B"], ax=ax)
         leg = ax.get_legend()
@@ -1190,9 +1191,9 @@ def test_secondary_legend(self):
 
         # TODO: color cycle problems
         assert len(colors) == 4
-        self.plt.close()
+        mpl.pyplot.close()
 
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         ax = fig.add_subplot(211)
         ax = df.plot(secondary_y=["C", "D"], ax=ax)
         leg = ax.get_legend()
@@ -1209,7 +1210,7 @@ def test_secondary_legend(self):
     def test_format_date_axis(self):
         rng = date_range("1/1/2012", periods=12, freq="M")
         df = DataFrame(np.random.randn(len(rng), 3), rng)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot(ax=ax)
         xaxis = ax.get_xaxis()
         for line in xaxis.get_ticklabels():
@@ -1219,7 +1220,7 @@ def test_format_date_axis(self):
     def test_ax_plot(self):
         x = date_range(start="2012-01-02", periods=10, freq="D")
         y = list(range(len(x)))
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         lines = ax.plot(x, y, label="Y")
         tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
 
@@ -1230,7 +1231,7 @@ def test_mpl_nopandas(self):
 
         kw = {"fmt": "-", "lw": 4}
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax.plot_date([x.toordinal() for x in dates], values1, **kw)
         ax.plot_date([x.toordinal() for x in dates], values2, **kw)
 
@@ -1249,7 +1250,7 @@ def test_irregular_ts_shared_ax_xlim(self):
         ts_irregular = ts.iloc[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
 
         # plot the left section of the irregular series, then the right section
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ts_irregular[:5].plot(ax=ax)
         ts_irregular[5:].plot(ax=ax)
 
@@ -1265,7 +1266,7 @@ def test_secondary_y_non_ts_xlim(self):
         s1 = Series(1, index=index_1)
         s2 = Series(2, index=index_2)
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s1.plot(ax=ax)
         left_before, right_before = ax.get_xlim()
         s2.plot(secondary_y=True, ax=ax)
@@ -1281,7 +1282,7 @@ def test_secondary_y_regular_ts_xlim(self):
         s1 = Series(1, index=index_1)
         s2 = Series(2, index=index_2)
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s1.plot(ax=ax)
         left_before, right_before = ax.get_xlim()
         s2.plot(secondary_y=True, ax=ax)
@@ -1295,7 +1296,7 @@ def test_secondary_y_mixed_freq_ts_xlim(self):
         rng = date_range("2000-01-01", periods=10000, freq="min")
         ts = Series(1, index=rng)
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ts.plot(ax=ax)
         left_before, right_before = ax.get_xlim()
         ts.resample("D").mean().plot(secondary_y=True, ax=ax)
@@ -1312,7 +1313,7 @@ def test_secondary_y_irregular_ts_xlim(self):
         ts = tm.makeTimeSeries()[:20]
         ts_irregular = ts.iloc[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ts_irregular[:5].plot(ax=ax)
         # plot higher-x values on secondary axis
         ts_irregular[5:].plot(secondary_y=True, ax=ax)
@@ -1326,7 +1327,7 @@ def test_secondary_y_irregular_ts_xlim(self):
     def test_plot_outofbounds_datetime(self):
         # 2579 - checking this does not raise
         values = [date(1677, 1, 1), date(1677, 1, 2)]
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax.plot(values)
 
         values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
@@ -1337,9 +1338,9 @@ def test_format_timedelta_ticks_narrow(self):
 
         rng = timedelta_range("0", periods=10, freq="ns")
         df = DataFrame(np.random.randn(len(rng), 3), rng)
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         df.plot(fontsize=2, ax=ax)
-        self.plt.draw()
+        mpl.pyplot.draw()
         labels = ax.get_xticklabels()
 
         result_labels = [x.get_text() for x in labels]
@@ -1361,9 +1362,9 @@ def test_format_timedelta_ticks_wide(self):
 
         rng = timedelta_range("0", periods=10, freq="1 d")
         df = DataFrame(np.random.randn(len(rng), 3), rng)
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         ax = df.plot(fontsize=2, ax=ax)
-        self.plt.draw()
+        mpl.pyplot.draw()
         labels = ax.get_xticklabels()
 
         result_labels = [x.get_text() for x in labels]
@@ -1373,19 +1374,19 @@ def test_format_timedelta_ticks_wide(self):
     def test_timedelta_plot(self):
         # test issue #8711
         s = Series(range(5), timedelta_range("1day", periods=5))
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         _check_plot_works(s.plot, ax=ax)
 
         # test long period
         index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 d")
         s = Series(np.random.randn(len(index)), index)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         _check_plot_works(s.plot, ax=ax)
 
         # test short period
         index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 ns")
         s = Series(np.random.randn(len(index)), index)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         _check_plot_works(s.plot, ax=ax)
 
     def test_hist(self):
@@ -1394,7 +1395,7 @@ def test_hist(self):
         x = rng
         w1 = np.arange(0, 1, 0.1)
         w2 = np.arange(0, 1, 0.1)[::-1]
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax.hist([x, x], weights=[w1, w2])
 
     def test_overlapping_datetime(self):
@@ -1418,7 +1419,7 @@ def test_overlapping_datetime(self):
 
         # plot first series, then add the second series to those axes,
         # then try adding the first series again
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s1.plot(ax=ax)
         s2.plot(ax=ax)
         s1.plot(ax=ax)
@@ -1440,9 +1441,9 @@ def test_matplotlib_scatter_datetime64(self):
         # https://github.com/matplotlib/matplotlib/issues/11391
         df = DataFrame(np.random.RandomState(0).rand(10, 2), columns=["x", "y"])
         df["time"] = date_range("2018-01-01", periods=10, freq="D")
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         ax.scatter(x="time", y="y", data=df)
-        self.plt.draw()
+        mpl.pyplot.draw()
         label = ax.get_xticklabels()[0]
         expected = "2018-01-01"
         assert label.get_text() == expected
@@ -1453,25 +1454,25 @@ def test_check_xticks_rot(self):
         x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-03"])
         df = DataFrame({"x": x, "y": [1, 2, 3]})
         axes = df.plot(x="x", y="y")
-        self._check_ticks_props(axes, xrot=0)
+        _check_ticks_props(axes, xrot=0)
 
         # irregular time series
         x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"])
         df = DataFrame({"x": x, "y": [1, 2, 3]})
         axes = df.plot(x="x", y="y")
-        self._check_ticks_props(axes, xrot=30)
+        _check_ticks_props(axes, xrot=30)
 
         # use timeseries index or not
         axes = df.set_index("x").plot(y="y", use_index=True)
-        self._check_ticks_props(axes, xrot=30)
+        _check_ticks_props(axes, xrot=30)
         axes = df.set_index("x").plot(y="y", use_index=False)
-        self._check_ticks_props(axes, xrot=0)
+        _check_ticks_props(axes, xrot=0)
 
         # separate subplots
         axes = df.plot(x="x", y="y", subplots=True, sharex=True)
-        self._check_ticks_props(axes, xrot=30)
+        _check_ticks_props(axes, xrot=30)
         axes = df.plot(x="x", y="y", subplots=True, sharex=False)
-        self._check_ticks_props(axes, xrot=0)
+        _check_ticks_props(axes, xrot=0)
 
 
 def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py
index 8cde3062d09f9..43e35e94164c3 100644
--- a/pandas/tests/plotting/test_groupby.py
+++ b/pandas/tests/plotting/test_groupby.py
@@ -4,19 +4,21 @@
 import numpy as np
 import pytest
 
-import pandas.util._test_decorators as td
-
 from pandas import (
     DataFrame,
     Index,
     Series,
 )
 import pandas._testing as tm
-from pandas.tests.plotting.common import TestPlotBase
+from pandas.tests.plotting.common import (
+    _check_axes_shape,
+    _check_legend_labels,
+)
+
+pytest.importorskip("matplotlib")
 
 
-@td.skip_if_no_mpl
-class TestDataFrameGroupByPlots(TestPlotBase):
+class TestDataFrameGroupByPlots:
     def test_series_groupby_plotting_nominally_works(self):
         n = 10
         weight = Series(np.random.normal(166, 20, size=n))
@@ -80,11 +82,9 @@ def test_groupby_hist_frame_with_legend(self, column, expected_axes_num):
         g = df.groupby("c")
 
         for axes in g.hist(legend=True, column=column):
-            self._check_axes_shape(
-                axes, axes_num=expected_axes_num, layout=expected_layout
-            )
+            _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
             for ax, expected_label in zip(axes[0], expected_labels):
-                self._check_legend_labels(ax, expected_label)
+                _check_legend_labels(ax, expected_label)
 
     @pytest.mark.parametrize("column", [None, "b"])
     def test_groupby_hist_frame_with_legend_raises(self, column):
@@ -103,8 +103,8 @@ def test_groupby_hist_series_with_legend(self):
         g = df.groupby("c")
 
         for ax in g["a"].hist(legend=True):
-            self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
-            self._check_legend_labels(ax, ["1", "2"])
+            _check_axes_shape(ax, axes_num=1, layout=(1, 1))
+            _check_legend_labels(ax, ["1", "2"])
 
     def test_groupby_hist_series_with_legend_raises(self):
         # GH 6279 - SeriesGroupBy histogram with legend and label raises
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 3392c309e0329..a033baa1a5f52 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -14,18 +14,27 @@
 )
 import pandas._testing as tm
 from pandas.tests.plotting.common import (
-    TestPlotBase,
+    _check_ax_scales,
+    _check_axes_shape,
+    _check_colors,
+    _check_legend_labels,
+    _check_patches_all_filled,
     _check_plot_works,
+    _check_text_labels,
+    _check_ticks_props,
+    get_x_axis,
+    get_y_axis,
 )
 
+mpl = pytest.importorskip("matplotlib")
+
 
 @pytest.fixture
 def ts():
     return tm.makeTimeSeries(name="ts")
 
 
-@td.skip_if_no_mpl
-class TestSeriesPlots(TestPlotBase):
+class TestSeriesPlots:
     def test_hist_legacy(self, ts):
         _check_plot_works(ts.hist)
         _check_plot_works(ts.hist, grid=False)
@@ -36,13 +45,13 @@ def test_hist_legacy(self, ts):
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             _check_plot_works(ts.hist, by=ts.index.month, bins=5)
 
-        fig, ax = self.plt.subplots(1, 1)
+        fig, ax = mpl.pyplot.subplots(1, 1)
         _check_plot_works(ts.hist, ax=ax, default_axes=True)
         _check_plot_works(ts.hist, ax=ax, figure=fig, default_axes=True)
         _check_plot_works(ts.hist, figure=fig, default_axes=True)
         tm.close()
 
-        fig, (ax1, ax2) = self.plt.subplots(1, 2)
+        fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2)
         _check_plot_works(ts.hist, figure=fig, ax=ax1, default_axes=True)
         _check_plot_works(ts.hist, figure=fig, ax=ax2, default_axes=True)
 
@@ -68,42 +77,33 @@ def test_hist_layout(self, hist_df):
             df.height.hist(layout=[1, 1])
 
     @pytest.mark.slow
-    def test_hist_layout_with_by(self, hist_df):
+    @pytest.mark.parametrize(
+        "by, layout, axes_num, res_layout",
+        [
+            ["gender", (2, 1), 2, (2, 1)],
+            ["gender", (3, -1), 2, (3, 1)],
+            ["category", (4, 1), 4, (4, 1)],
+            ["category", (2, -1), 4, (2, 2)],
+            ["category", (3, -1), 4, (3, 2)],
+            ["category", (-1, 4), 4, (1, 4)],
+            ["classroom", (2, 2), 3, (2, 2)],
+        ],
+    )
+    def test_hist_layout_with_by(self, hist_df, by, layout, axes_num, res_layout):
         df = hist_df
 
         # _check_plot_works adds an `ax` kwarg to the method call
         # so we get a warning about an axis being cleared, even
         # though we don't explicing pass one, see GH #13188
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
-        self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
-
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))
-        self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
-
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
-
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1))
-        self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
-
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1))
-        self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
-
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4))
-        self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
+            axes = _check_plot_works(df.height.hist, by=getattr(df, by), layout=layout)
+        _check_axes_shape(axes, axes_num=axes_num, layout=res_layout)
 
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2))
-        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+    def test_hist_layout_with_by_shape(self, hist_df):
+        df = hist_df
 
         axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
+        _check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
 
     def test_hist_no_overlap(self):
         from matplotlib.pyplot import (
@@ -124,7 +124,7 @@ def test_hist_no_overlap(self):
     def test_hist_by_no_extra_plots(self, hist_df):
         df = hist_df
         df.height.hist(by=df.gender)
-        assert len(self.plt.get_fignums()) == 1
+        assert len(mpl.pyplot.get_fignums()) == 1
 
     def test_plot_fails_when_ax_differs_from_figure(self, ts):
         from pylab import figure
@@ -149,7 +149,7 @@ def test_histtype_argument(self, histtype, expected):
         # GH23992 Verify functioning of histtype argument
         ser = Series(np.random.randint(1, 10))
         ax = ser.hist(histtype=histtype)
-        self._check_patches_all_filled(ax, filled=expected)
+        _check_patches_all_filled(ax, filled=expected)
 
     @pytest.mark.parametrize(
         "by, expected_axes_num, expected_layout", [(None, 1, (1, 1)), ("b", 2, (1, 2))]
@@ -162,8 +162,8 @@ def test_hist_with_legend(self, by, expected_axes_num, expected_layout):
 
         # Use default_axes=True when plotting method generate subplots itself
         axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by)
-        self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
-        self._check_legend_labels(axes, "a")
+        _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
+        _check_legend_labels(axes, "a")
 
     @pytest.mark.parametrize("by", [None, "b"])
     def test_hist_with_legend_raises(self, by):
@@ -176,143 +176,154 @@ def test_hist_with_legend_raises(self, by):
             s.hist(legend=True, by=by, label="c")
 
     def test_hist_kwargs(self, ts):
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.hist(bins=5, ax=ax)
         assert len(ax.patches) == 5
-        self._check_text_labels(ax.yaxis.get_label(), "Frequency")
+        _check_text_labels(ax.yaxis.get_label(), "Frequency")
         tm.close()
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.hist(orientation="horizontal", ax=ax)
-        self._check_text_labels(ax.xaxis.get_label(), "Frequency")
+        _check_text_labels(ax.xaxis.get_label(), "Frequency")
         tm.close()
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.hist(align="left", stacked=True, ax=ax)
         tm.close()
 
     @pytest.mark.xfail(reason="Api changed in 3.6.0")
     @td.skip_if_no_scipy
     def test_hist_kde(self, ts):
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.hist(logy=True, ax=ax)
-        self._check_ax_scales(ax, yaxis="log")
+        _check_ax_scales(ax, yaxis="log")
         xlabels = ax.get_xticklabels()
         # ticks are values, thus ticklabels are blank
-        self._check_text_labels(xlabels, [""] * len(xlabels))
+        _check_text_labels(xlabels, [""] * len(xlabels))
         ylabels = ax.get_yticklabels()
-        self._check_text_labels(ylabels, [""] * len(ylabels))
+        _check_text_labels(ylabels, [""] * len(ylabels))
 
         _check_plot_works(ts.plot.kde)
         _check_plot_works(ts.plot.density)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.kde(logy=True, ax=ax)
-        self._check_ax_scales(ax, yaxis="log")
+        _check_ax_scales(ax, yaxis="log")
         xlabels = ax.get_xticklabels()
-        self._check_text_labels(xlabels, [""] * len(xlabels))
+        _check_text_labels(xlabels, [""] * len(xlabels))
         ylabels = ax.get_yticklabels()
-        self._check_text_labels(ylabels, [""] * len(ylabels))
+        _check_text_labels(ylabels, [""] * len(ylabels))
 
     @td.skip_if_no_scipy
     def test_hist_kde_color(self, ts):
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax)
-        self._check_ax_scales(ax, yaxis="log")
+        _check_ax_scales(ax, yaxis="log")
         assert len(ax.patches) == 10
-        self._check_colors(ax.patches, facecolors=["b"] * 10)
+        _check_colors(ax.patches, facecolors=["b"] * 10)
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.kde(logy=True, color="r", ax=ax)
-        self._check_ax_scales(ax, yaxis="log")
+        _check_ax_scales(ax, yaxis="log")
         lines = ax.get_lines()
         assert len(lines) == 1
-        self._check_colors(lines, ["r"])
+        _check_colors(lines, ["r"])
 
 
-@td.skip_if_no_mpl
-class TestDataFramePlots(TestPlotBase):
+class TestDataFramePlots:
     @pytest.mark.slow
     def test_hist_df_legacy(self, hist_df):
-        from matplotlib.patches import Rectangle
-
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             _check_plot_works(hist_df.hist)
 
+    @pytest.mark.slow
+    def test_hist_df_legacy_layout(self):
         # make sure layout is handled
-        df = DataFrame(np.random.randn(100, 2))
+        df = DataFrame(np.random.randn(10, 2))
         df[2] = to_datetime(
             np.random.randint(
                 812419200000000000,
                 819331200000000000,
-                size=100,
+                size=10,
                 dtype=np.int64,
             )
         )
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             axes = _check_plot_works(df.hist, grid=False)
-        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=3, layout=(2, 2))
         assert not axes[1, 1].get_visible()
 
         _check_plot_works(df[[2]].hist)
-        df = DataFrame(np.random.randn(100, 1))
+
+    @pytest.mark.slow
+    def test_hist_df_legacy_layout2(self):
+        df = DataFrame(np.random.randn(10, 1))
         _check_plot_works(df.hist)
 
+    @pytest.mark.slow
+    def test_hist_df_legacy_layout3(self):
         # make sure layout is handled
-        df = DataFrame(np.random.randn(100, 5))
+        df = DataFrame(np.random.randn(10, 5))
         df[5] = to_datetime(
             np.random.randint(
                 812419200000000000,
                 819331200000000000,
-                size=100,
+                size=10,
                 dtype=np.int64,
             )
         )
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             axes = _check_plot_works(df.hist, layout=(4, 2))
-        self._check_axes_shape(axes, axes_num=6, layout=(4, 2))
+        _check_axes_shape(axes, axes_num=6, layout=(4, 2))
 
+    @pytest.mark.slow
+    @pytest.mark.parametrize(
+        "kwargs", [{"sharex": True, "sharey": True}, {"figsize": (8, 10)}, {"bins": 5}]
+    )
+    def test_hist_df_legacy_layout_kwargs(self, kwargs):
+        df = DataFrame(np.random.randn(10, 5))
+        df[5] = to_datetime(
+            np.random.randint(
+                812419200000000000,
+                819331200000000000,
+                size=10,
+                dtype=np.int64,
+            )
+        )
         # make sure sharex, sharey is handled
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            _check_plot_works(df.hist, sharex=True, sharey=True)
-
         # handle figsize arg
-        with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            _check_plot_works(df.hist, figsize=(8, 10))
-
         # check bins argument
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
-            _check_plot_works(df.hist, bins=5)
+            _check_plot_works(df.hist, **kwargs)
 
+    @pytest.mark.slow
+    def test_hist_df_legacy_layout_labelsize_rot(self, frame_or_series):
         # make sure xlabelsize and xrot are handled
-        ser = df[0]
+        obj = frame_or_series(range(10))
         xf, yf = 20, 18
         xrot, yrot = 30, 40
-        axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
-        self._check_ticks_props(
-            axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot
-        )
+        axes = obj.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
+        _check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
 
-        xf, yf = 20, 18
-        xrot, yrot = 30, 40
-        axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
-        self._check_ticks_props(
-            axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot
-        )
-
-        tm.close()
+    @pytest.mark.slow
+    def test_hist_df_legacy_rectangles(self):
+        from matplotlib.patches import Rectangle
 
+        ser = Series(range(10))
         ax = ser.hist(cumulative=True, bins=4, density=True)
         # height of last bin (index 5) must be 1.0
         rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
         tm.assert_almost_equal(rects[-1].get_height(), 1.0)
 
-        tm.close()
+    @pytest.mark.slow
+    def test_hist_df_legacy_scale(self):
+        ser = Series(range(10))
         ax = ser.hist(log=True)
         # scale of y must be 'log'
-        self._check_ax_scales(ax, yaxis="log")
-
-        tm.close()
+        _check_ax_scales(ax, yaxis="log")
 
+    @pytest.mark.slow
+    def test_hist_df_legacy_external_error(self):
+        ser = Series(range(10))
         # propagate attr exception from matplotlib.Axes.hist
         with tm.external_error_raised(AttributeError):
             ser.hist(foo="bar")
@@ -368,7 +379,7 @@ def test_hist_layout(self):
         for layout_test in layout_to_expected_size:
             axes = df.hist(layout=layout_test["layout"])
             expected = layout_test["expected_size"]
-            self._check_axes_shape(axes, axes_num=3, layout=expected)
+            _check_axes_shape(axes, axes_num=3, layout=expected)
 
         # layout too small for all 4 plots
         msg = "Layout of 1x1 must be larger than required size 3"
@@ -396,7 +407,7 @@ def test_tight_layout(self):
         )
         # Use default_axes=True when plotting method generate subplots itself
         _check_plot_works(df.hist, default_axes=True)
-        self.plt.tight_layout()
+        mpl.pyplot.tight_layout()
 
         tm.close()
 
@@ -417,7 +428,7 @@ def test_hist_subplot_xrot(self):
             bins=5,
             xrot=0,
         )
-        self._check_ticks_props(axes, xrot=0)
+        _check_ticks_props(axes, xrot=0)
 
     @pytest.mark.parametrize(
         "column, expected",
@@ -461,7 +472,7 @@ def test_histtype_argument(self, histtype, expected):
         # GH23992 Verify functioning of histtype argument
         df = DataFrame(np.random.randint(1, 10, size=(100, 2)), columns=["a", "b"])
         ax = df.hist(histtype=histtype)
-        self._check_patches_all_filled(ax, filled=expected)
+        _check_patches_all_filled(ax, filled=expected)
 
     @pytest.mark.parametrize("by", [None, "c"])
     @pytest.mark.parametrize("column", [None, "b"])
@@ -485,11 +496,11 @@ def test_hist_with_legend(self, by, column):
             column=column,
         )
 
-        self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
+        _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
         if by is None and column is None:
             axes = axes[0]
         for expected_label, ax in zip(expected_labels, axes):
-            self._check_legend_labels(ax, expected_label)
+            _check_legend_labels(ax, expected_label)
 
     @pytest.mark.parametrize("by", [None, "c"])
     @pytest.mark.parametrize("column", [None, "b"])
@@ -503,7 +514,7 @@ def test_hist_with_legend_raises(self, by, column):
 
     def test_hist_df_kwargs(self):
         df = DataFrame(np.random.randn(10, 2))
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot.hist(bins=5, ax=ax)
         assert len(ax.patches) == 10
 
@@ -513,11 +524,11 @@ def test_hist_df_with_nonnumerics(self):
             np.random.RandomState(42).randn(10, 4), columns=["A", "B", "C", "D"]
         )
         df["E"] = ["x", "y"] * 5
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot.hist(bins=5, ax=ax)
         assert len(ax.patches) == 20
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot.hist(ax=ax)  # bins=10
         assert len(ax.patches) == 40
 
@@ -526,35 +537,35 @@ def test_hist_secondary_legend(self):
         df = DataFrame(np.random.randn(30, 4), columns=list("abcd"))
 
         # primary -> secondary
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df["a"].plot.hist(legend=True, ax=ax)
         df["b"].plot.hist(ax=ax, legend=True, secondary_y=True)
         # both legends are drawn on left ax
         # left and right axis must be visible
-        self._check_legend_labels(ax, labels=["a", "b (right)"])
+        _check_legend_labels(ax, labels=["a", "b (right)"])
         assert ax.get_yaxis().get_visible()
         assert ax.right_ax.get_yaxis().get_visible()
         tm.close()
 
         # secondary -> secondary
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax)
         df["b"].plot.hist(ax=ax, legend=True, secondary_y=True)
         # both legends are draw on left ax
         # left axis must be invisible, right axis must be visible
-        self._check_legend_labels(ax.left_ax, labels=["a (right)", "b (right)"])
+        _check_legend_labels(ax.left_ax, labels=["a (right)", "b (right)"])
         assert not ax.left_ax.get_yaxis().get_visible()
         assert ax.get_yaxis().get_visible()
         tm.close()
 
         # secondary -> primary
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax)
         # right axes is returned
         df["b"].plot.hist(ax=ax, legend=True)
         # both legends are draw on left ax
         # left and right axis must be visible
-        self._check_legend_labels(ax.left_ax, labels=["a (right)", "b"])
+        _check_legend_labels(ax.left_ax, labels=["a (right)", "b"])
         assert ax.left_ax.get_yaxis().get_visible()
         assert ax.get_yaxis().get_visible()
         tm.close()
@@ -572,11 +583,11 @@ def test_hist_with_nans_and_weights(self):
 
         from matplotlib.patches import Rectangle
 
-        _, ax0 = self.plt.subplots()
+        _, ax0 = mpl.pyplot.subplots()
         df.plot.hist(ax=ax0, weights=weights)
         rects = [x for x in ax0.get_children() if isinstance(x, Rectangle)]
         heights = [rect.get_height() for rect in rects]
-        _, ax1 = self.plt.subplots()
+        _, ax1 = mpl.pyplot.subplots()
         no_nan_df.plot.hist(ax=ax1, weights=no_nan_weights)
         no_nan_rects = [x for x in ax1.get_children() if isinstance(x, Rectangle)]
         no_nan_heights = [rect.get_height() for rect in no_nan_rects]
@@ -586,12 +597,11 @@ def test_hist_with_nans_and_weights(self):
 
         msg = "weights must have the same shape as data, or be a single column"
         with pytest.raises(ValueError, match=msg):
-            _, ax2 = self.plt.subplots()
+            _, ax2 = mpl.pyplot.subplots()
             no_nan_df.plot.hist(ax=ax2, weights=idxerror_weights)
 
 
-@td.skip_if_no_mpl
-class TestDataFrameGroupByPlots(TestPlotBase):
+class TestDataFrameGroupByPlots:
     def test_grouped_hist_legacy(self):
         from matplotlib.patches import Rectangle
 
@@ -610,17 +620,17 @@ def test_grouped_hist_legacy(self):
         df["D"] = ["X"] * 500
 
         axes = _grouped_hist(df.A, by=df.C)
-        self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=4, layout=(2, 2))
 
         tm.close()
         axes = df.hist(by=df.C)
-        self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=4, layout=(2, 2))
 
         tm.close()
         # group by a key with single value
         axes = df.hist(by="D", rot=30)
-        self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
-        self._check_ticks_props(axes, xrot=30)
+        _check_axes_shape(axes, axes_num=1, layout=(1, 1))
+        _check_ticks_props(axes, xrot=30)
 
         tm.close()
         # make sure kwargs to hist are handled
@@ -643,14 +653,12 @@ def test_grouped_hist_legacy(self):
             rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
             height = rects[-1].get_height()
             tm.assert_almost_equal(height, 1.0)
-        self._check_ticks_props(
-            axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot
-        )
+        _check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
 
         tm.close()
         axes = _grouped_hist(df.A, by=df.C, log=True)
         # scale of y must be 'log'
-        self._check_ax_scales(axes, yaxis="log")
+        _check_ax_scales(axes, yaxis="log")
 
         tm.close()
         # propagate attr exception from matplotlib.Axes.hist
@@ -670,7 +678,7 @@ def test_grouped_hist_legacy2(self):
         gb = df_int.groupby("gender")
         axes = gb.hist()
         assert len(axes) == 2
-        assert len(self.plt.get_fignums()) == 2
+        assert len(mpl.pyplot.get_fignums()) == 2
         tm.close()
 
     @pytest.mark.slow
@@ -692,22 +700,22 @@ def test_grouped_hist_layout(self, hist_df):
             axes = _check_plot_works(
                 df.hist, column="height", by=df.gender, layout=(2, 1)
             )
-        self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
+        _check_axes_shape(axes, axes_num=2, layout=(2, 1))
 
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             axes = _check_plot_works(
                 df.hist, column="height", by=df.gender, layout=(2, -1)
             )
-        self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
+        _check_axes_shape(axes, axes_num=2, layout=(2, 1))
 
         axes = df.hist(column="height", by=df.category, layout=(4, 1))
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
+        _check_axes_shape(axes, axes_num=4, layout=(4, 1))
 
         axes = df.hist(column="height", by=df.category, layout=(-1, 1))
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
+        _check_axes_shape(axes, axes_num=4, layout=(4, 1))
 
         axes = df.hist(column="height", by=df.category, layout=(4, 2), figsize=(12, 8))
-        self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8))
+        _check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8))
         tm.close()
 
         # GH 6769
@@ -715,34 +723,34 @@ def test_grouped_hist_layout(self, hist_df):
             axes = _check_plot_works(
                 df.hist, column="height", by="classroom", layout=(2, 2)
             )
-        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=3, layout=(2, 2))
 
         # without column
         with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
             axes = _check_plot_works(df.hist, by="classroom")
-        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=3, layout=(2, 2))
 
         axes = df.hist(by="gender", layout=(3, 5))
-        self._check_axes_shape(axes, axes_num=2, layout=(3, 5))
+        _check_axes_shape(axes, axes_num=2, layout=(3, 5))
 
         axes = df.hist(column=["height", "weight", "category"])
-        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+        _check_axes_shape(axes, axes_num=3, layout=(2, 2))
 
     def test_grouped_hist_multiple_axes(self, hist_df):
         # GH 6970, GH 7069
         df = hist_df
 
-        fig, axes = self.plt.subplots(2, 3)
+        fig, axes = mpl.pyplot.subplots(2, 3)
         returned = df.hist(column=["height", "weight", "category"], ax=axes[0])
-        self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
+        _check_axes_shape(returned, axes_num=3, layout=(1, 3))
         tm.assert_numpy_array_equal(returned, axes[0])
         assert returned[0].figure is fig
         returned = df.hist(by="classroom", ax=axes[1])
-        self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
+        _check_axes_shape(returned, axes_num=3, layout=(1, 3))
         tm.assert_numpy_array_equal(returned, axes[1])
         assert returned[0].figure is fig
 
-        fig, axes = self.plt.subplots(2, 3)
+        fig, axes = mpl.pyplot.subplots(2, 3)
         # pass different number of axes from required
         msg = "The number of passed axes must be 1, the same as the output plot"
         with pytest.raises(ValueError, match=msg):
@@ -754,35 +762,35 @@ def test_axis_share_x(self, hist_df):
         ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True)
 
         # share x
-        assert self.get_x_axis(ax1).joined(ax1, ax2)
-        assert self.get_x_axis(ax2).joined(ax1, ax2)
+        assert get_x_axis(ax1).joined(ax1, ax2)
+        assert get_x_axis(ax2).joined(ax1, ax2)
 
         # don't share y
-        assert not self.get_y_axis(ax1).joined(ax1, ax2)
-        assert not self.get_y_axis(ax2).joined(ax1, ax2)
+        assert not get_y_axis(ax1).joined(ax1, ax2)
+        assert not get_y_axis(ax2).joined(ax1, ax2)
 
     def test_axis_share_y(self, hist_df):
         df = hist_df
         ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True)
 
         # share y
-        assert self.get_y_axis(ax1).joined(ax1, ax2)
-        assert self.get_y_axis(ax2).joined(ax1, ax2)
+        assert get_y_axis(ax1).joined(ax1, ax2)
+        assert get_y_axis(ax2).joined(ax1, ax2)
 
         # don't share x
-        assert not self.get_x_axis(ax1).joined(ax1, ax2)
-        assert not self.get_x_axis(ax2).joined(ax1, ax2)
+        assert not get_x_axis(ax1).joined(ax1, ax2)
+        assert not get_x_axis(ax2).joined(ax1, ax2)
 
     def test_axis_share_xy(self, hist_df):
         df = hist_df
         ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True)
 
         # share both x and y
-        assert self.get_x_axis(ax1).joined(ax1, ax2)
-        assert self.get_x_axis(ax2).joined(ax1, ax2)
+        assert get_x_axis(ax1).joined(ax1, ax2)
+        assert get_x_axis(ax2).joined(ax1, ax2)
 
-        assert self.get_y_axis(ax1).joined(ax1, ax2)
-        assert self.get_y_axis(ax2).joined(ax1, ax2)
+        assert get_y_axis(ax1).joined(ax1, ax2)
+        assert get_y_axis(ax2).joined(ax1, ax2)
 
     @pytest.mark.parametrize(
         "histtype, expected",
@@ -797,4 +805,4 @@ def test_histtype_argument(self, histtype, expected):
         # GH23992 Verify functioning of histtype argument
         df = DataFrame(np.random.randint(1, 10, size=(100, 2)), columns=["a", "b"])
         ax = df.hist(by="a", histtype=histtype)
-        self._check_patches_all_filled(ax, filled=expected)
+        _check_patches_all_filled(ax, filled=expected)
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index a89956d1c14c8..e8797266fcbbe 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -15,10 +15,15 @@
 )
 import pandas._testing as tm
 from pandas.tests.plotting.common import (
-    TestPlotBase,
+    _check_colors,
+    _check_legend_labels,
     _check_plot_works,
+    _check_text_labels,
+    _check_ticks_props,
 )
 
+mpl = pytest.importorskip("matplotlib")
+
 
 @td.skip_if_mpl
 def test_import_error_message():
@@ -63,8 +68,7 @@ def test_get_accessor_args():
     assert len(kwargs) == 24
 
 
-@td.skip_if_no_mpl
-class TestSeriesPlots(TestPlotBase):
+class TestSeriesPlots:
     def test_autocorrelation_plot(self):
         from pandas.plotting import autocorrelation_plot
 
@@ -75,7 +79,7 @@ def test_autocorrelation_plot(self):
             _check_plot_works(autocorrelation_plot, series=ser.values)
 
             ax = autocorrelation_plot(ser, label="Test")
-        self._check_legend_labels(ax, labels=["Test"])
+        _check_legend_labels(ax, labels=["Test"])
 
     @pytest.mark.parametrize("kwargs", [{}, {"lag": 5}])
     def test_lag_plot(self, kwargs):
@@ -91,8 +95,7 @@ def test_bootstrap_plot(self):
         _check_plot_works(bootstrap_plot, series=ser, size=10)
 
 
-@td.skip_if_no_mpl
-class TestDataFramePlots(TestPlotBase):
+class TestDataFramePlots:
     @td.skip_if_no_scipy
     @pytest.mark.parametrize("pass_axis", [False, True])
     def test_scatter_matrix_axis(self, pass_axis):
@@ -100,7 +103,7 @@ def test_scatter_matrix_axis(self, pass_axis):
 
         ax = None
         if pass_axis:
-            _, ax = self.plt.subplots(3, 3)
+            _, ax = mpl.pyplot.subplots(3, 3)
 
         df = DataFrame(np.random.RandomState(42).randn(100, 3))
 
@@ -116,8 +119,8 @@ def test_scatter_matrix_axis(self, pass_axis):
 
         # GH 5662
         expected = ["-2", "0", "2"]
-        self._check_text_labels(axes0_labels, expected)
-        self._check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
+        _check_text_labels(axes0_labels, expected)
+        _check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
 
         df[0] = (df[0] - 2) / 3
 
@@ -131,8 +134,8 @@ def test_scatter_matrix_axis(self, pass_axis):
             )
         axes0_labels = axes[0][0].yaxis.get_majorticklabels()
         expected = ["-1.0", "-0.5", "0.0"]
-        self._check_text_labels(axes0_labels, expected)
-        self._check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
+        _check_text_labels(axes0_labels, expected)
+        _check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
 
     @pytest.mark.slow
     def test_andrews_curves(self, iris):
@@ -149,25 +152,19 @@ def test_andrews_curves(self, iris):
         ax = _check_plot_works(
             andrews_curves, frame=df, class_column="Name", color=rgba
         )
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10])
 
         cnames = ["dodgerblue", "aquamarine", "seagreen"]
         ax = _check_plot_works(
             andrews_curves, frame=df, class_column="Name", color=cnames
         )
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10])
 
         ax = _check_plot_works(
             andrews_curves, frame=df, class_column="Name", colormap=cm.jet
         )
         cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10])
 
         length = 10
         df = DataFrame(
@@ -185,31 +182,25 @@ def test_andrews_curves(self, iris):
         ax = _check_plot_works(
             andrews_curves, frame=df, class_column="Name", color=rgba
         )
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10])
 
         cnames = ["dodgerblue", "aquamarine", "seagreen"]
         ax = _check_plot_works(
             andrews_curves, frame=df, class_column="Name", color=cnames
         )
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10])
 
         ax = _check_plot_works(
             andrews_curves, frame=df, class_column="Name", colormap=cm.jet
         )
         cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10])
 
         colors = ["b", "g", "r"]
         df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors})
         ax = andrews_curves(df, "Name", color=colors)
         handles, labels = ax.get_legend_handles_labels()
-        self._check_colors(handles, linecolors=colors)
+        _check_colors(handles, linecolors=colors)
 
     @pytest.mark.slow
     def test_parallel_coordinates(self, iris):
@@ -227,25 +218,19 @@ def test_parallel_coordinates(self, iris):
         ax = _check_plot_works(
             parallel_coordinates, frame=df, class_column="Name", color=rgba
         )
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10])
 
         cnames = ["dodgerblue", "aquamarine", "seagreen"]
         ax = _check_plot_works(
             parallel_coordinates, frame=df, class_column="Name", color=cnames
         )
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10])
 
         ax = _check_plot_works(
             parallel_coordinates, frame=df, class_column="Name", colormap=cm.jet
         )
         cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
-        self._check_colors(
-            ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]
-        )
+        _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10])
 
         ax = _check_plot_works(
             parallel_coordinates, frame=df, class_column="Name", axvlines=False
@@ -256,7 +241,7 @@ def test_parallel_coordinates(self, iris):
         df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors})
         ax = parallel_coordinates(df, "Name", color=colors)
         handles, labels = ax.get_legend_handles_labels()
-        self._check_colors(handles, linecolors=colors)
+        _check_colors(handles, linecolors=colors)
 
     # not sure if this is indicative of a problem
     @pytest.mark.filterwarnings("ignore:Attempting to set:UserWarning")
@@ -299,17 +284,17 @@ def test_radviz(self, iris):
         ax = _check_plot_works(radviz, frame=df, class_column="Name", color=rgba)
         # skip Circle drawn as ticks
         patches = [p for p in ax.patches[:20] if p.get_label() != ""]
-        self._check_colors(patches[:10], facecolors=rgba, mapping=df["Name"][:10])
+        _check_colors(patches[:10], facecolors=rgba, mapping=df["Name"][:10])
 
         cnames = ["dodgerblue", "aquamarine", "seagreen"]
         _check_plot_works(radviz, frame=df, class_column="Name", color=cnames)
         patches = [p for p in ax.patches[:20] if p.get_label() != ""]
-        self._check_colors(patches, facecolors=cnames, mapping=df["Name"][:10])
+        _check_colors(patches, facecolors=cnames, mapping=df["Name"][:10])
 
         _check_plot_works(radviz, frame=df, class_column="Name", colormap=cm.jet)
         cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
         patches = [p for p in ax.patches[:20] if p.get_label() != ""]
-        self._check_colors(patches, facecolors=cmaps, mapping=df["Name"][:10])
+        _check_colors(patches, facecolors=cmaps, mapping=df["Name"][:10])
 
         colors = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0]]
         df = DataFrame(
@@ -317,7 +302,7 @@ def test_radviz(self, iris):
         )
         ax = radviz(df, "Name", color=colors)
         handles, labels = ax.get_legend_handles_labels()
-        self._check_colors(handles, facecolors=colors)
+        _check_colors(handles, facecolors=colors)
 
     def test_subplot_titles(self, iris):
         df = iris.drop("Name", axis=1).head()
@@ -478,7 +463,7 @@ def test_has_externally_shared_axis_x_axis(self):
         # Test _has_externally_shared_axis() works for x-axis
         func = plotting._matplotlib.tools._has_externally_shared_axis
 
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         plots = fig.subplots(2, 4)
 
         # Create *externally* shared axes for first and third columns
@@ -503,7 +488,7 @@ def test_has_externally_shared_axis_y_axis(self):
         # Test _has_externally_shared_axis() works for y-axis
         func = plotting._matplotlib.tools._has_externally_shared_axis
 
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         plots = fig.subplots(4, 2)
 
         # Create *externally* shared axes for first and third rows
@@ -529,7 +514,7 @@ def test_has_externally_shared_axis_invalid_compare_axis(self):
         # passed an invalid value as compare_axis parameter
         func = plotting._matplotlib.tools._has_externally_shared_axis
 
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         plots = fig.subplots(4, 2)
 
         # Create arbitrary axes
@@ -546,7 +531,7 @@ def test_externally_shared_axes(self):
         df = DataFrame({"a": np.random.randn(1000), "b": np.random.randn(1000)})
 
         # Create figure
-        fig = self.plt.figure()
+        fig = mpl.pyplot.figure()
         plots = fig.subplots(2, 3)
 
         # Create *externally* shared axes
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 755a1811c1356..c2dffdb6f7e47 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -18,10 +18,21 @@
 )
 import pandas._testing as tm
 from pandas.tests.plotting.common import (
-    TestPlotBase,
+    _check_ax_scales,
+    _check_axes_shape,
+    _check_colors,
+    _check_grid_settings,
+    _check_has_errorbars,
+    _check_legend_labels,
     _check_plot_works,
+    _check_text_labels,
+    _check_ticks_props,
+    _unpack_cycler,
+    get_y_axis,
 )
 
+mpl = pytest.importorskip("matplotlib")
+
 
 @pytest.fixture
 def ts():
@@ -38,23 +49,22 @@ def iseries():
     return tm.makePeriodSeries(name="iseries")
 
 
-@td.skip_if_no_mpl
-class TestSeriesPlots(TestPlotBase):
+class TestSeriesPlots:
     @pytest.mark.slow
     def test_plot(self, ts):
         _check_plot_works(ts.plot, label="foo")
         _check_plot_works(ts.plot, use_index=False)
         axes = _check_plot_works(ts.plot, rot=0)
-        self._check_ticks_props(axes, xrot=0)
+        _check_ticks_props(axes, xrot=0)
 
         ax = _check_plot_works(ts.plot, style=".", logy=True)
-        self._check_ax_scales(ax, yaxis="log")
+        _check_ax_scales(ax, yaxis="log")
 
         ax = _check_plot_works(ts.plot, style=".", logx=True)
-        self._check_ax_scales(ax, xaxis="log")
+        _check_ax_scales(ax, xaxis="log")
 
         ax = _check_plot_works(ts.plot, style=".", loglog=True)
-        self._check_ax_scales(ax, xaxis="log", yaxis="log")
+        _check_ax_scales(ax, xaxis="log", yaxis="log")
 
         _check_plot_works(ts[:10].plot.bar)
         _check_plot_works(ts.plot.area, stacked=False)
@@ -81,35 +91,35 @@ def test_plot_series_barh(self, series):
 
     def test_plot_series_bar_ax(self):
         ax = _check_plot_works(Series(np.random.randn(10)).plot.bar, color="black")
-        self._check_colors([ax.patches[0]], facecolors=["black"])
+        _check_colors([ax.patches[0]], facecolors=["black"])
 
     def test_plot_6951(self, ts):
         # GH 6951
         ax = _check_plot_works(ts.plot, subplots=True)
-        self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
+        _check_axes_shape(ax, axes_num=1, layout=(1, 1))
 
         ax = _check_plot_works(ts.plot, subplots=True, layout=(-1, 1))
-        self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
+        _check_axes_shape(ax, axes_num=1, layout=(1, 1))
         ax = _check_plot_works(ts.plot, subplots=True, layout=(1, -1))
-        self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
+        _check_axes_shape(ax, axes_num=1, layout=(1, 1))
 
     def test_plot_figsize_and_title(self, series):
         # figsize and title
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = series.plot(title="Test", figsize=(16, 8), ax=ax)
-        self._check_text_labels(ax.title, "Test")
-        self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
+        _check_text_labels(ax.title, "Test")
+        _check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
 
     def test_dont_modify_rcParams(self):
         # GH 8242
         key = "axes.prop_cycle"
-        colors = self.plt.rcParams[key]
-        _, ax = self.plt.subplots()
+        colors = mpl.pyplot.rcParams[key]
+        _, ax = mpl.pyplot.subplots()
         Series([1, 2, 3]).plot(ax=ax)
-        assert colors == self.plt.rcParams[key]
+        assert colors == mpl.pyplot.rcParams[key]
 
     def test_ts_line_lim(self, ts):
-        fig, ax = self.plt.subplots()
+        fig, ax = mpl.pyplot.subplots()
         ax = ts.plot(ax=ax)
         xmin, xmax = ax.get_xlim()
         lines = ax.get_lines()
@@ -124,81 +134,81 @@ def test_ts_line_lim(self, ts):
         assert xmax >= lines[0].get_data(orig=False)[0][-1]
 
     def test_ts_area_lim(self, ts):
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.area(stacked=False, ax=ax)
         xmin, xmax = ax.get_xlim()
         line = ax.get_lines()[0].get_data(orig=False)[0]
         assert xmin <= line[0]
         assert xmax >= line[-1]
-        self._check_ticks_props(ax, xrot=0)
+        _check_ticks_props(ax, xrot=0)
         tm.close()
 
         # GH 7471
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.area(stacked=False, x_compat=True, ax=ax)
         xmin, xmax = ax.get_xlim()
         line = ax.get_lines()[0].get_data(orig=False)[0]
         assert xmin <= line[0]
         assert xmax >= line[-1]
-        self._check_ticks_props(ax, xrot=30)
+        _check_ticks_props(ax, xrot=30)
         tm.close()
 
         tz_ts = ts.copy()
         tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET")
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax)
         xmin, xmax = ax.get_xlim()
         line = ax.get_lines()[0].get_data(orig=False)[0]
         assert xmin <= line[0]
         assert xmax >= line[-1]
-        self._check_ticks_props(ax, xrot=0)
+        _check_ticks_props(ax, xrot=0)
         tm.close()
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax)
         xmin, xmax = ax.get_xlim()
         line = ax.get_lines()[0].get_data(orig=False)[0]
         assert xmin <= line[0]
         assert xmax >= line[-1]
-        self._check_ticks_props(ax, xrot=0)
+        _check_ticks_props(ax, xrot=0)
 
     def test_area_sharey_dont_overwrite(self, ts):
         # GH37942
-        fig, (ax1, ax2) = self.plt.subplots(1, 2, sharey=True)
+        fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True)
 
         abs(ts).plot(ax=ax1, kind="area")
         abs(ts).plot(ax=ax2, kind="area")
 
-        assert self.get_y_axis(ax1).joined(ax1, ax2)
-        assert self.get_y_axis(ax2).joined(ax1, ax2)
+        assert get_y_axis(ax1).joined(ax1, ax2)
+        assert get_y_axis(ax2).joined(ax1, ax2)
 
     def test_label(self):
         s = Series([1, 2])
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = s.plot(label="LABEL", legend=True, ax=ax)
-        self._check_legend_labels(ax, labels=["LABEL"])
-        self.plt.close()
-        _, ax = self.plt.subplots()
+        _check_legend_labels(ax, labels=["LABEL"])
+        mpl.pyplot.close()
+        _, ax = mpl.pyplot.subplots()
         ax = s.plot(legend=True, ax=ax)
-        self._check_legend_labels(ax, labels=[""])
-        self.plt.close()
+        _check_legend_labels(ax, labels=[""])
+        mpl.pyplot.close()
         # get name from index
         s.name = "NAME"
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = s.plot(legend=True, ax=ax)
-        self._check_legend_labels(ax, labels=["NAME"])
-        self.plt.close()
+        _check_legend_labels(ax, labels=["NAME"])
+        mpl.pyplot.close()
         # override the default
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = s.plot(legend=True, label="LABEL", ax=ax)
-        self._check_legend_labels(ax, labels=["LABEL"])
-        self.plt.close()
+        _check_legend_labels(ax, labels=["LABEL"])
+        mpl.pyplot.close()
         # Add lebel info, but don't draw
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = s.plot(legend=False, label="LABEL", ax=ax)
         assert ax.get_legend() is None  # Hasn't been drawn
         ax.legend()  # draw it
-        self._check_legend_labels(ax, labels=["LABEL"])
+        _check_legend_labels(ax, labels=["LABEL"])
 
     def test_boolean(self):
         # GH 23719
@@ -231,11 +241,11 @@ def test_line_area_nan_series(self, index):
     def test_line_use_index_false(self):
         s = Series([1, 2, 3], index=["a", "b", "c"])
         s.index.name = "The Index"
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = s.plot(use_index=False, ax=ax)
         label = ax.get_xlabel()
         assert label == ""
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax2 = s.plot.bar(use_index=False, ax=ax)
         label2 = ax2.get_xlabel()
         assert label2 == ""
@@ -248,12 +258,12 @@ def test_line_use_index_false(self):
     def test_bar_log(self):
         expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4])
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = Series([200, 500]).plot.bar(log=True, ax=ax)
         tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
         tm.close()
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = Series([200, 500]).plot.barh(log=True, ax=ax)
         tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
         tm.close()
@@ -261,7 +271,7 @@ def test_bar_log(self):
         # GH 9905
         expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1])
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind="bar", ax=ax)
         ymin = 0.0007943282347242822
         ymax = 0.12589254117941673
@@ -271,7 +281,7 @@ def test_bar_log(self):
         tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
         tm.close()
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind="barh", ax=ax)
         res = ax.get_xlim()
         tm.assert_almost_equal(res[0], ymin)
@@ -280,9 +290,9 @@ def test_bar_log(self):
 
     def test_bar_ignore_index(self):
         df = Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot.bar(use_index=False, ax=ax)
-        self._check_text_labels(ax.get_xticklabels(), ["0", "1", "2", "3"])
+        _check_text_labels(ax.get_xticklabels(), ["0", "1", "2", "3"])
 
     def test_bar_user_colors(self):
         s = Series([1, 2, 3, 4])
@@ -299,13 +309,13 @@ def test_bar_user_colors(self):
     def test_rotation(self):
         df = DataFrame(np.random.randn(5, 5))
         # Default rot 0
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         axes = df.plot(ax=ax)
-        self._check_ticks_props(axes, xrot=0)
+        _check_ticks_props(axes, xrot=0)
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         axes = df.plot(rot=30, ax=ax)
-        self._check_ticks_props(axes, xrot=30)
+        _check_ticks_props(axes, xrot=30)
 
     def test_irregular_datetime(self):
         from pandas.plotting._matplotlib.converter import DatetimeConverter
@@ -313,19 +323,19 @@ def test_irregular_datetime(self):
         rng = date_range("1/1/2000", "3/1/2000")
         rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
         ser = Series(np.random.randn(len(rng)), rng)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ser.plot(ax=ax)
         xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax)
         ax.set_xlim("1/1/1999", "1/1/2001")
         assert xp == ax.get_xlim()[0]
-        self._check_ticks_props(ax, xrot=30)
+        _check_ticks_props(ax, xrot=30)
 
     def test_unsorted_index_xlim(self):
         ser = Series(
             [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0],
             index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],
         )
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ser.plot(ax=ax)
         xmin, xmax = ax.get_xlim()
         lines = ax.get_lines()
@@ -339,26 +349,26 @@ def test_pie_series(self):
             np.random.randint(1, 5), index=["a", "b", "c", "d", "e"], name="YLABEL"
         )
         ax = _check_plot_works(series.plot.pie)
-        self._check_text_labels(ax.texts, series.index)
+        _check_text_labels(ax.texts, series.index)
         assert ax.get_ylabel() == "YLABEL"
 
         # without wedge labels
         ax = _check_plot_works(series.plot.pie, labels=None)
-        self._check_text_labels(ax.texts, [""] * 5)
+        _check_text_labels(ax.texts, [""] * 5)
 
         # with less colors than elements
         color_args = ["r", "g", "b"]
         ax = _check_plot_works(series.plot.pie, colors=color_args)
 
         color_expected = ["r", "g", "b", "r", "g"]
-        self._check_colors(ax.patches, facecolors=color_expected)
+        _check_colors(ax.patches, facecolors=color_expected)
 
         # with labels and colors
         labels = ["A", "B", "C", "D", "E"]
         color_args = ["r", "g", "b", "c", "m"]
         ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args)
-        self._check_text_labels(ax.texts, labels)
-        self._check_colors(ax.patches, facecolors=color_args)
+        _check_text_labels(ax.texts, labels)
+        _check_colors(ax.patches, facecolors=color_args)
 
         # with autopct and fontsize
         ax = _check_plot_works(
@@ -366,7 +376,7 @@ def test_pie_series(self):
         )
         pcts = [f"{s*100:.2f}" for s in series.values / series.sum()]
         expected_texts = list(chain.from_iterable(zip(series.index, pcts)))
-        self._check_text_labels(ax.texts, expected_texts)
+        _check_text_labels(ax.texts, expected_texts)
         for t in ax.texts:
             assert t.get_fontsize() == 7
 
@@ -378,11 +388,11 @@ def test_pie_series(self):
         # includes nan
         series = Series([1, 2, np.nan, 4], index=["a", "b", "c", "d"], name="YLABEL")
         ax = _check_plot_works(series.plot.pie)
-        self._check_text_labels(ax.texts, ["a", "b", "", "d"])
+        _check_text_labels(ax.texts, ["a", "b", "", "d"])
 
     def test_pie_nan(self):
         s = Series([1, np.nan, 1, 1])
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = s.plot.pie(legend=True, ax=ax)
         expected = ["0", "", "2", "3"]
         result = [x.get_text() for x in ax.texts]
@@ -394,59 +404,59 @@ def test_df_series_secondary_legend(self):
         s = Series(np.random.randn(30), name="x")
 
         # primary -> secondary (without passing ax)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot(ax=ax)
         s.plot(legend=True, secondary_y=True, ax=ax)
         # both legends are drawn on left ax
         # left and right axis must be visible
-        self._check_legend_labels(ax, labels=["a", "b", "c", "x (right)"])
+        _check_legend_labels(ax, labels=["a", "b", "c", "x (right)"])
         assert ax.get_yaxis().get_visible()
         assert ax.right_ax.get_yaxis().get_visible()
         tm.close()
 
         # primary -> secondary (with passing ax)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot(ax=ax)
         s.plot(ax=ax, legend=True, secondary_y=True)
         # both legends are drawn on left ax
         # left and right axis must be visible
-        self._check_legend_labels(ax, labels=["a", "b", "c", "x (right)"])
+        _check_legend_labels(ax, labels=["a", "b", "c", "x (right)"])
         assert ax.get_yaxis().get_visible()
         assert ax.right_ax.get_yaxis().get_visible()
         tm.close()
 
         # secondary -> secondary (without passing ax)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot(secondary_y=True, ax=ax)
         s.plot(legend=True, secondary_y=True, ax=ax)
         # both legends are drawn on left ax
         # left axis must be invisible and right axis must be visible
         expected = ["a (right)", "b (right)", "c (right)", "x (right)"]
-        self._check_legend_labels(ax.left_ax, labels=expected)
+        _check_legend_labels(ax.left_ax, labels=expected)
         assert not ax.left_ax.get_yaxis().get_visible()
         assert ax.get_yaxis().get_visible()
         tm.close()
 
         # secondary -> secondary (with passing ax)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot(secondary_y=True, ax=ax)
         s.plot(ax=ax, legend=True, secondary_y=True)
         # both legends are drawn on left ax
         # left axis must be invisible and right axis must be visible
         expected = ["a (right)", "b (right)", "c (right)", "x (right)"]
-        self._check_legend_labels(ax.left_ax, expected)
+        _check_legend_labels(ax.left_ax, expected)
         assert not ax.left_ax.get_yaxis().get_visible()
         assert ax.get_yaxis().get_visible()
         tm.close()
 
         # secondary -> secondary (with passing ax)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = df.plot(secondary_y=True, mark_right=False, ax=ax)
         s.plot(ax=ax, legend=True, secondary_y=True)
         # both legends are drawn on left ax
         # left axis must be invisible and right axis must be visible
         expected = ["a", "b", "c", "x (right)"]
-        self._check_legend_labels(ax.left_ax, expected)
+        _check_legend_labels(ax.left_ax, expected)
         assert not ax.left_ax.get_yaxis().get_visible()
         assert ax.get_yaxis().get_visible()
         tm.close()
@@ -468,7 +478,7 @@ def test_secondary_logy(self, input_logy, expected_scale):
 
     def test_plot_fails_with_dupe_color_and_style(self):
         x = Series(np.random.randn(2))
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         msg = (
             "Cannot pass 'style' string with a color symbol and 'color' keyword "
             "argument. Please use one or the other or pass 'style' without a color "
@@ -485,10 +495,10 @@ def test_kde_kwargs(self, ts):
         _check_plot_works(ts.plot.kde, bw_method=None, ind=np.int_(20))
         _check_plot_works(ts.plot.kde, bw_method=0.5, ind=sample_points)
         _check_plot_works(ts.plot.density, bw_method=0.5, ind=sample_points)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax)
-        self._check_ax_scales(ax, yaxis="log")
-        self._check_text_labels(ax.yaxis.get_label(), "Density")
+        _check_ax_scales(ax, yaxis="log")
+        _check_text_labels(ax.yaxis.get_label(), "Density")
 
     @td.skip_if_no_scipy
     def test_kde_missing_vals(self):
@@ -501,13 +511,13 @@ def test_kde_missing_vals(self):
 
     @pytest.mark.xfail(reason="Api changed in 3.6.0")
     def test_boxplot_series(self, ts):
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ts.plot.box(logy=True, ax=ax)
-        self._check_ax_scales(ax, yaxis="log")
+        _check_ax_scales(ax, yaxis="log")
         xlabels = ax.get_xticklabels()
-        self._check_text_labels(xlabels, [ts.name])
+        _check_text_labels(xlabels, [ts.name])
         ylabels = ax.get_yticklabels()
-        self._check_text_labels(ylabels, [""] * len(ylabels))
+        _check_text_labels(ylabels, [""] * len(ylabels))
 
     @td.skip_if_no_scipy
     @pytest.mark.parametrize(
@@ -516,17 +526,17 @@ def test_boxplot_series(self, ts):
     )
     def test_kind_both_ways(self, kind):
         s = Series(range(3))
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s.plot(kind=kind, ax=ax)
-        self.plt.close()
-        _, ax = self.plt.subplots()
+        mpl.pyplot.close()
+        _, ax = mpl.pyplot.subplots()
         getattr(s.plot, kind)()
-        self.plt.close()
+        mpl.pyplot.close()
 
     @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
     def test_invalid_plot_data(self, kind):
         s = Series(list("abcd"))
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         msg = "no numeric data to plot"
         with pytest.raises(TypeError, match=msg):
             s.plot(kind=kind, ax=ax)
@@ -540,7 +550,7 @@ def test_valid_object_plot(self, kind):
     @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
     def test_partially_invalid_plot_data(self, kind):
         s = Series(["a", "b", 1.0, 2])
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         msg = "no numeric data to plot"
         with pytest.raises(TypeError, match=msg):
             s.plot(kind=kind, ax=ax)
@@ -589,18 +599,18 @@ def test_errorbar_plot(self):
         kinds = ["line", "bar"]
         for kind in kinds:
             ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
-            self._check_has_errorbars(ax, xerr=0, yerr=1)
+            _check_has_errorbars(ax, xerr=0, yerr=1)
             ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
-            self._check_has_errorbars(ax, xerr=0, yerr=1)
+            _check_has_errorbars(ax, xerr=0, yerr=1)
             ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
-            self._check_has_errorbars(ax, xerr=0, yerr=1)
+            _check_has_errorbars(ax, xerr=0, yerr=1)
             ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
-            self._check_has_errorbars(ax, xerr=0, yerr=1)
+            _check_has_errorbars(ax, xerr=0, yerr=1)
             ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
-            self._check_has_errorbars(ax, xerr=1, yerr=1)
+            _check_has_errorbars(ax, xerr=1, yerr=1)
 
         ax = _check_plot_works(s.plot, xerr=s_err)
-        self._check_has_errorbars(ax, xerr=1, yerr=0)
+        _check_has_errorbars(ax, xerr=1, yerr=0)
 
         # test time series plotting
         ix = date_range("1/1/2000", "1/1/2001", freq="M")
@@ -609,9 +619,9 @@ def test_errorbar_plot(self):
         td_err = DataFrame(np.abs(np.random.randn(12, 2)), index=ix, columns=["x", "y"])
 
         ax = _check_plot_works(ts.plot, yerr=ts_err)
-        self._check_has_errorbars(ax, xerr=0, yerr=1)
+        _check_has_errorbars(ax, xerr=0, yerr=1)
         ax = _check_plot_works(ts.plot, yerr=td_err)
-        self._check_has_errorbars(ax, xerr=0, yerr=1)
+        _check_has_errorbars(ax, xerr=0, yerr=1)
 
         # check incorrect lengths and types
         with tm.external_error_raised(ValueError):
@@ -630,7 +640,7 @@ def test_table(self, series):
     @td.skip_if_no_scipy
     def test_series_grid_settings(self):
         # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
-        self._check_grid_settings(
+        _check_grid_settings(
             Series([1, 2, 3]),
             plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,
         )
@@ -686,39 +696,39 @@ def test_standard_colors_all(self):
 
     def test_series_plot_color_kwargs(self):
         # GH1890
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = Series(np.arange(12) + 1).plot(color="green", ax=ax)
-        self._check_colors(ax.get_lines(), linecolors=["green"])
+        _check_colors(ax.get_lines(), linecolors=["green"])
 
     def test_time_series_plot_color_kwargs(self):
         # #1890
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = Series(np.arange(12) + 1, index=date_range("1/1/2000", periods=12)).plot(
             color="green", ax=ax
         )
-        self._check_colors(ax.get_lines(), linecolors=["green"])
+        _check_colors(ax.get_lines(), linecolors=["green"])
 
     def test_time_series_plot_color_with_empty_kwargs(self):
         import matplotlib as mpl
 
-        def_colors = self._unpack_cycler(mpl.rcParams)
+        def_colors = _unpack_cycler(mpl.rcParams)
         index = date_range("1/1/2000", periods=12)
         s = Series(np.arange(1, 13), index=index)
 
         ncolors = 3
 
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         for i in range(ncolors):
             ax = s.plot(ax=ax)
-        self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
+        _check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
 
     def test_xticklabels(self):
         # GH11529
         s = Series(np.arange(10), index=[f"P{i:02d}" for i in range(10)])
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = s.plot(xticks=[0, 3, 5, 9], ax=ax)
         exp = [f"P{i:02d}" for i in [0, 3, 5, 9]]
-        self._check_text_labels(ax.get_xticklabels(), exp)
+        _check_text_labels(ax.get_xticklabels(), exp)
 
     def test_xtick_barPlot(self):
         # GH28172
@@ -749,12 +759,12 @@ def test_custom_business_day_freq(self):
     )
     def test_plot_accessor_updates_on_inplace(self):
         ser = Series([1, 2, 3, 4])
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         ax = ser.plot(ax=ax)
         before = ax.xaxis.get_ticklocs()
 
         ser.drop([0, 1], inplace=True)
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         after = ax.xaxis.get_ticklocs()
         tm.assert_numpy_array_equal(before, after)
 
@@ -763,7 +773,7 @@ def test_plot_xlim_for_series(self, kind):
         # test if xlim is also correctly plotted in Series for line and area
         # GH 27686
         s = Series([2, 3])
-        _, ax = self.plt.subplots()
+        _, ax = mpl.pyplot.subplots()
         s.plot(kind=kind, ax=ax)
         xlims = ax.get_xlim()
 
@@ -853,5 +863,5 @@ def test_series_none_color(self):
         # GH51953
         series = Series([1, 2, 3])
         ax = series.plot(color=None)
-        expected = self._unpack_cycler(self.plt.rcParams)[:1]
-        self._check_colors(ax.get_lines(), linecolors=expected)
+        expected = _unpack_cycler(mpl.pyplot.rcParams)[:1]
+        _check_colors(ax.get_lines(), linecolors=expected)
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 4ea3c75cb684a..83b9a83c0a6a2 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -47,12 +47,9 @@ def get_objs():
     return objs
 
 
-objs = get_objs()
-
-
 class TestReductions:
     @pytest.mark.parametrize("opname", ["max", "min"])
-    @pytest.mark.parametrize("obj", objs)
+    @pytest.mark.parametrize("obj", get_objs())
     def test_ops(self, opname, obj):
         result = getattr(obj, opname)()
         if not isinstance(obj, PeriodIndex):
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 9514ccd24c1ca..b82afab49954d 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -80,11 +80,11 @@ def test_asfreq_fill_value(series, create_index):
 
 @all_ts
 def test_resample_interpolate(frame):
-    # # 12925
+    # GH#12925
     df = frame
-    tm.assert_frame_equal(
-        df.resample("1T").asfreq().interpolate(), df.resample("1T").interpolate()
-    )
+    result = df.resample("1T").asfreq().interpolate()
+    expected = df.resample("1T").interpolate()
+    tm.assert_frame_equal(result, expected)
 
 
 def test_raises_on_non_datetimelike_index():
@@ -95,7 +95,7 @@ def test_raises_on_non_datetimelike_index():
         "but got an instance of 'RangeIndex'"
     )
     with pytest.raises(TypeError, match=msg):
-        xp.resample("A").mean()
+        xp.resample("A")
 
 
 @all_ts
@@ -132,13 +132,17 @@ def test_resample_empty_series(freq, empty_series_dti, resample_method, request)
 
 
 @all_ts
-@pytest.mark.parametrize("freq", ["M", "D", "H"])
-def test_resample_nat_index_series(request, freq, series, resample_method):
+@pytest.mark.parametrize(
+    "freq",
+    [
+        pytest.param("M", marks=pytest.mark.xfail(reason="Don't know why this fails")),
+        "D",
+        "H",
+    ],
+)
+def test_resample_nat_index_series(freq, series, resample_method):
     # GH39227
 
-    if freq == "M":
-        request.node.add_marker(pytest.mark.xfail(reason="Don't know why this fails"))
-
     ser = series.copy()
     ser.index = PeriodIndex([NaT] * len(ser), freq=freq)
     rs = ser.resample(freq)
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 389db2d36474d..0f52f2d1c65ee 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1968,3 +1968,19 @@ def test_long_rule_non_nano():
     )
     expected = Series([1.0, 3.0, 6.5, 4.0, 3.0, 6.5, 4.0, 3.0, 6.5], index=expected_idx)
     tm.assert_series_equal(result, expected)
+
+
+def test_resample_empty_series_with_tz():
+    # GH#53664
+    df = DataFrame({"ts": [], "values": []}).astype(
+        {"ts": "datetime64[ns, Atlantic/Faroe]"}
+    )
+    result = df.resample("2MS", on="ts", closed="left", label="left", origin="start")[
+        "values"
+    ].sum()
+
+    expected_idx = DatetimeIndex(
+        [], freq="2MS", name="ts", dtype="datetime64[ns, Atlantic/Faroe]"
+    )
+    expected = Series([], index=expected_idx, name="values", dtype="float64")
+    tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index d7b8f0c8053da..dbd28868b81b1 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -1,4 +1,5 @@
 from datetime import datetime
+import re
 
 import numpy as np
 import pytest
@@ -15,38 +16,43 @@
 import pandas._testing as tm
 from pandas.core.indexes.datetimes import date_range
 
-dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="Min")
 
-test_series = Series(np.random.rand(len(dti)), dti)
-_test_frame = DataFrame({"A": test_series, "B": test_series, "C": np.arange(len(dti))})
+@pytest.fixture
+def dti():
+    return date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="Min")
+
+
+@pytest.fixture
+def _test_series(dti):
+    return Series(np.random.rand(len(dti)), dti)
 
 
 @pytest.fixture
-def test_frame():
-    return _test_frame.copy()
+def test_frame(dti, _test_series):
+    return DataFrame({"A": _test_series, "B": _test_series, "C": np.arange(len(dti))})
 
 
-def test_str():
-    r = test_series.resample("H")
+def test_str(_test_series):
+    r = _test_series.resample("H")
     assert (
         "DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, "
         "label=left, convention=start, origin=start_day]" in str(r)
     )
 
-    r = test_series.resample("H", origin="2000-01-01")
+    r = _test_series.resample("H", origin="2000-01-01")
     assert (
         "DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, "
         "label=left, convention=start, origin=2000-01-01 00:00:00]" in str(r)
     )
 
 
-def test_api():
-    r = test_series.resample("H")
+def test_api(_test_series):
+    r = _test_series.resample("H")
     result = r.mean()
     assert isinstance(result, Series)
     assert len(result) == 217
 
-    r = test_series.to_frame().resample("H")
+    r = _test_series.to_frame().resample("H")
     result = r.mean()
     assert isinstance(result, DataFrame)
     assert len(result) == 217
@@ -115,11 +121,11 @@ def test_resample_group_keys():
     tm.assert_frame_equal(result, expected)
 
 
-def test_pipe(test_frame):
+def test_pipe(test_frame, _test_series):
     # GH17905
 
     # series
-    r = test_series.resample("H")
+    r = _test_series.resample("H")
     expected = r.max() - r.mean()
     result = r.pipe(lambda x: x.max() - x.mean())
     tm.assert_series_equal(result, expected)
@@ -186,7 +192,8 @@ def tests_raises_on_nuisance(test_frame):
     tm.assert_frame_equal(result, expected)
 
     expected = r[["A", "B", "C"]].mean()
-    with pytest.raises(TypeError, match="Could not convert"):
+    msg = re.escape("agg function failed [how->mean,dtype->object]")
+    with pytest.raises(TypeError, match=msg):
         r.mean()
     result = r.mean(numeric_only=True)
     tm.assert_frame_equal(result, expected)
@@ -259,9 +266,9 @@ def test_combined_up_downsampling_of_irregular():
     tm.assert_series_equal(result, expected)
 
 
-def test_transform_series():
-    r = test_series.resample("20min")
-    expected = test_series.groupby(pd.Grouper(freq="20min")).transform("mean")
+def test_transform_series(_test_series):
+    r = _test_series.resample("20min")
+    expected = _test_series.groupby(pd.Grouper(freq="20min")).transform("mean")
     result = r.transform("mean")
     tm.assert_series_equal(result, expected)
 
@@ -317,17 +324,17 @@ def test_fillna():
     ],
     ids=["resample", "groupby"],
 )
-def test_apply_without_aggregation(func):
+def test_apply_without_aggregation(func, _test_series):
     # both resample and groupby should work w/o aggregation
-    t = func(test_series)
+    t = func(_test_series)
     result = t.apply(lambda x: x)
-    tm.assert_series_equal(result, test_series)
+    tm.assert_series_equal(result, _test_series)
 
 
-def test_apply_without_aggregation2():
-    grouped = test_series.to_frame(name="foo").resample("20min", group_keys=False)
+def test_apply_without_aggregation2(_test_series):
+    grouped = _test_series.to_frame(name="foo").resample("20min", group_keys=False)
     result = grouped["foo"].apply(lambda x: x)
-    tm.assert_series_equal(result, test_series.rename("foo"))
+    tm.assert_series_equal(result, _test_series.rename("foo"))
 
 
 def test_agg_consistency():
@@ -886,8 +893,13 @@ def test_frame_downsample_method(method, numeric_only, expected_data):
 
     func = getattr(resampled, method)
     if isinstance(expected_data, str):
-        klass = TypeError if method in ("var", "mean", "median", "prod") else ValueError
-        with pytest.raises(klass, match=expected_data):
+        if method in ("var", "mean", "median", "prod"):
+            klass = TypeError
+            msg = re.escape(f"agg function failed [how->{method},dtype->object]")
+        else:
+            klass = ValueError
+            msg = expected_data
+        with pytest.raises(klass, match=msg):
             _ = func(**kwargs)
     else:
         result = func(**kwargs)
@@ -933,7 +945,8 @@ def test_series_downsample_method(method, numeric_only, expected_data):
         with pytest.raises(TypeError, match=msg):
             func(**kwargs)
     elif method == "prod":
-        with pytest.raises(TypeError, match="can't multiply sequence by non-int"):
+        msg = re.escape("agg function failed [how->prod,dtype->object]")
+        with pytest.raises(TypeError, match=msg):
             func(**kwargs)
     else:
         result = func(**kwargs)
@@ -1002,13 +1015,13 @@ def test_df_axis_param_depr():
         df.resample("M", axis=0)
 
 
-def test_series_axis_param_depr():
+def test_series_axis_param_depr(_test_series):
     warning_msg = (
         "The 'axis' keyword in Series.resample is "
         "deprecated and will be removed in a future version."
     )
     with tm.assert_produces_warning(FutureWarning, match=warning_msg):
-        test_series.resample("H", axis=0)
+        _test_series.resample("H", axis=0)
 
 
 def test_resample_empty():
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 1682edb42915d..df14a5bc374c6 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -17,10 +17,13 @@
 import pandas._testing as tm
 from pandas.core.indexes.datetimes import date_range
 
-test_frame = DataFrame(
-    {"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)},
-    index=date_range("1/1/2000", freq="s", periods=40),
-)
+
+@pytest.fixture
+def test_frame():
+    return DataFrame(
+        {"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)},
+        index=date_range("1/1/2000", freq="s", periods=40),
+    )
 
 
 @async_mark()
@@ -85,7 +88,7 @@ def f_1(x):
     tm.assert_frame_equal(result, expected)
 
 
-def test_getitem():
+def test_getitem(test_frame):
     g = test_frame.groupby("A")
 
     expected = g.B.apply(lambda x: x.resample("2s").mean())
@@ -217,7 +220,7 @@ def test_nearest():
         "ohlc",
     ],
 )
-def test_methods(f):
+def test_methods(f, test_frame):
     g = test_frame.groupby("A")
     r = g.resample("2s")
 
@@ -226,7 +229,7 @@ def test_methods(f):
     tm.assert_equal(result, expected)
 
 
-def test_methods_nunique():
+def test_methods_nunique(test_frame):
     # series only
     g = test_frame.groupby("A")
     r = g.resample("2s")
@@ -236,7 +239,7 @@ def test_methods_nunique():
 
 
 @pytest.mark.parametrize("f", ["std", "var"])
-def test_methods_std_var(f):
+def test_methods_std_var(f, test_frame):
     g = test_frame.groupby("A")
     r = g.resample("2s")
     result = getattr(r, f)(ddof=1)
@@ -244,7 +247,7 @@ def test_methods_std_var(f):
     tm.assert_frame_equal(result, expected)
 
 
-def test_apply():
+def test_apply(test_frame):
     g = test_frame.groupby("A")
     r = g.resample("2s")
 
@@ -342,7 +345,7 @@ def test_resample_groupby_with_label():
     tm.assert_frame_equal(result, expected)
 
 
-def test_consistency_with_window():
+def test_consistency_with_window(test_frame):
     # consistent return values with window
     df = test_frame
     expected = Index([1, 2, 3], name="A")
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index debfb48c2b39c..a5fb48f801522 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -14,10 +14,13 @@
 from pandas.core.groupby.grouper import Grouper
 from pandas.core.indexes.datetimes import date_range
 
-test_series = Series(np.random.randn(1000), index=date_range("1/1/2000", periods=1000))
 
+@pytest.fixture
+def test_series():
+    return Series(np.random.randn(1000), index=date_range("1/1/2000", periods=1000))
 
-def test_apply():
+
+def test_apply(test_series):
     grouper = Grouper(freq="A", label="right", closed="right")
 
     grouped = test_series.groupby(grouper)
@@ -33,7 +36,7 @@ def f(x):
     tm.assert_series_equal(applied, expected)
 
 
-def test_count():
+def test_count(test_series):
     test_series[::3] = np.nan
 
     expected = test_series.groupby(lambda x: x.year).count()
@@ -48,7 +51,7 @@ def test_count():
     tm.assert_series_equal(result, expected)
 
 
-def test_numpy_reduction():
+def test_numpy_reduction(test_series):
     result = test_series.resample("A", closed="right").prod()
 
     expected = test_series.groupby(lambda x: x.year).agg(np.prod)
diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py
index 79e024ef84c90..d87e6ca2f69b9 100644
--- a/pandas/tests/resample/test_timedelta.py
+++ b/pandas/tests/resample/test_timedelta.py
@@ -48,17 +48,17 @@ def test_resample_as_freq_with_subperiod():
 def test_resample_with_timedeltas():
     expected = DataFrame({"A": np.arange(1480)})
     expected = expected.groupby(expected.index // 30).sum()
-    expected.index = timedelta_range("0 days", freq="30T", periods=50)
+    expected.index = timedelta_range("0 days", freq="30min", periods=50)
 
     df = DataFrame(
-        {"A": np.arange(1480)}, index=pd.to_timedelta(np.arange(1480), unit="T")
+        {"A": np.arange(1480)}, index=pd.to_timedelta(np.arange(1480), unit="min")
     )
-    result = df.resample("30T").sum()
+    result = df.resample("30min").sum()
 
     tm.assert_frame_equal(result, expected)
 
     s = df["A"]
-    result = s.resample("30T").sum()
+    result = s.resample("30min").sum()
     tm.assert_series_equal(result, expected["A"])
 
 
diff --git a/pandas/tests/reshape/concat/test_append_common.py b/pandas/tests/reshape/concat/test_append_common.py
index 2d84de8145111..948545320a31a 100644
--- a/pandas/tests/reshape/concat/test_append_common.py
+++ b/pandas/tests/reshape/concat/test_append_common.py
@@ -10,37 +10,46 @@
 )
 import pandas._testing as tm
 
-dt_data = [
-    pd.Timestamp("2011-01-01"),
-    pd.Timestamp("2011-01-02"),
-    pd.Timestamp("2011-01-03"),
-]
-tz_data = [
-    pd.Timestamp("2011-01-01", tz="US/Eastern"),
-    pd.Timestamp("2011-01-02", tz="US/Eastern"),
-    pd.Timestamp("2011-01-03", tz="US/Eastern"),
-]
-td_data = [
-    pd.Timedelta("1 days"),
-    pd.Timedelta("2 days"),
-    pd.Timedelta("3 days"),
-]
-period_data = [
-    pd.Period("2011-01", freq="M"),
-    pd.Period("2011-02", freq="M"),
-    pd.Period("2011-03", freq="M"),
-]
-data_dict = {
-    "bool": [True, False, True],
-    "int64": [1, 2, 3],
-    "float64": [1.1, np.nan, 3.3],
-    "category": Categorical(["X", "Y", "Z"]),
-    "object": ["a", "b", "c"],
-    "datetime64[ns]": dt_data,
-    "datetime64[ns, US/Eastern]": tz_data,
-    "timedelta64[ns]": td_data,
-    "period[M]": period_data,
-}
+
+@pytest.fixture(
+    params=list(
+        {
+            "bool": [True, False, True],
+            "int64": [1, 2, 3],
+            "float64": [1.1, np.nan, 3.3],
+            "category": Categorical(["X", "Y", "Z"]),
+            "object": ["a", "b", "c"],
+            "datetime64[ns]": [
+                pd.Timestamp("2011-01-01"),
+                pd.Timestamp("2011-01-02"),
+                pd.Timestamp("2011-01-03"),
+            ],
+            "datetime64[ns, US/Eastern]": [
+                pd.Timestamp("2011-01-01", tz="US/Eastern"),
+                pd.Timestamp("2011-01-02", tz="US/Eastern"),
+                pd.Timestamp("2011-01-03", tz="US/Eastern"),
+            ],
+            "timedelta64[ns]": [
+                pd.Timedelta("1 days"),
+                pd.Timedelta("2 days"),
+                pd.Timedelta("3 days"),
+            ],
+            "period[M]": [
+                pd.Period("2011-01", freq="M"),
+                pd.Period("2011-02", freq="M"),
+                pd.Period("2011-03", freq="M"),
+            ],
+        }.items()
+    )
+)
+def item(request):
+    key, data = request.param
+    return key, data
+
+
+@pytest.fixture
+def item2(item):
+    return item
 
 
 class TestConcatAppendCommon:
@@ -48,13 +57,6 @@ class TestConcatAppendCommon:
     Test common dtype coercion rules between concat and append.
     """
 
-    @pytest.fixture(params=sorted(data_dict.keys()))
-    def item(self, request):
-        key = request.param
-        return key, data_dict[key]
-
-    item2 = item
-
     def test_dtypes(self, item, index_or_series):
         # to confirm test case covers intended dtypes
         typ, vals = item
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index d5b0ad6b2d56d..ffdff75e53cf7 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -1,3 +1,5 @@
+import re
+
 import numpy as np
 import pytest
 
@@ -567,7 +569,8 @@ def test_mixed_type_join_with_suffix(self):
         df.insert(5, "dt", "foo")
 
         grouped = df.groupby("id")
-        with pytest.raises(TypeError, match="Could not convert"):
+        msg = re.escape("agg function failed [how->mean,dtype->object]")
+        with pytest.raises(TypeError, match=msg):
             grouped.mean()
         mn = grouped.mean(numeric_only=True)
         cn = grouped.count()
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index b40f0f7a45263..b6fcb27faf146 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -4,6 +4,7 @@
     timedelta,
 )
 from itertools import product
+import re
 
 import numpy as np
 import pytest
@@ -923,7 +924,8 @@ def test_no_col(self, data):
 
         # to help with a buglet
         data.columns = [k * 2 for k in data.columns]
-        with pytest.raises(TypeError, match="Could not convert"):
+        msg = re.escape("agg function failed [how->mean,dtype->object]")
+        with pytest.raises(TypeError, match=msg):
             data.pivot_table(index=["AA", "BB"], margins=True, aggfunc=np.mean)
         table = data.drop(columns="CC").pivot_table(
             index=["AA", "BB"], margins=True, aggfunc=np.mean
@@ -932,7 +934,7 @@ def test_no_col(self, data):
             totals = table.loc[("All", ""), value_col]
             assert totals == data[value_col].mean()
 
-        with pytest.raises(TypeError, match="Could not convert"):
+        with pytest.raises(TypeError, match=msg):
             data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean")
         table = data.drop(columns="CC").pivot_table(
             index=["AA", "BB"], margins=True, aggfunc="mean"
@@ -995,7 +997,8 @@ def test_margin_with_only_columns_defined(
             }
         )
         if aggfunc != "sum":
-            with pytest.raises(TypeError, match="Could not convert"):
+            msg = re.escape("agg function failed [how->mean,dtype->object]")
+            with pytest.raises(TypeError, match=msg):
                 df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc)
         if "B" not in columns:
             df = df.drop(columns="B")
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 8296201345d2f..6f163b7ecd89d 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -491,7 +491,7 @@ def test_nat_arithmetic_ndarray(dtype, op, out_dtype):
 
 def test_nat_pinned_docstrings():
     # see gh-17327
-    assert NaT.ctime.__doc__ == datetime.ctime.__doc__
+    assert NaT.ctime.__doc__ == Timestamp.ctime.__doc__
 
 
 def test_to_numpy_alias():
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index ad9dd408fbeaf..7bd9e5fc5e293 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -289,7 +289,6 @@ def test_overflow_on_construction():
 @pytest.mark.parametrize(
     "val, unit",
     [
-        (3508, "M"),
         (15251, "W"),  # 1
         (106752, "D"),  # change from previous:
         (2562048, "h"),  # 0 hours
@@ -333,7 +332,6 @@ def test_construction_out_of_bounds_td64ns(val, unit):
 @pytest.mark.parametrize(
     "val, unit",
     [
-        (3508 * 10**9, "M"),
         (15251 * 10**9, "W"),
         (106752 * 10**9, "D"),
         (2562048 * 10**9, "h"),
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 4cb77b2f1d065..722a68a1dce71 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -492,11 +492,9 @@ def test_nat_converters(self):
                 "minute",
                 "min",
                 "minutes",
-                "t",
                 "Minute",
                 "Min",
                 "Minutes",
-                "T",
             ]
         ]
         + [
@@ -520,13 +518,11 @@ def test_nat_converters(self):
                 "millisecond",
                 "milli",
                 "millis",
-                "l",
                 "MS",
                 "Milliseconds",
                 "Millisecond",
                 "Milli",
                 "Millis",
-                "L",
             ]
         ]
         + [
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index afb4dd7422114..aa326f3a43bd5 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -1124,9 +1124,27 @@ def test_negative_dates():
     # https://github.com/pandas-dev/pandas/issues/50787
     ts = Timestamp("-2000-01-01")
     msg = (
-        "^strftime not yet supported on Timestamps which are outside the range of "
+        " not yet supported on Timestamps which are outside the range of "
         "Python's standard library. For now, please call the components you need "
         r"\(such as `.year` and `.month`\) and construct your string from there.$"
     )
-    with pytest.raises(NotImplementedError, match=msg):
+    func = "^strftime"
+    with pytest.raises(NotImplementedError, match=func + msg):
         ts.strftime("%Y")
+
+    msg = (
+        " not yet supported on Timestamps which "
+        "are outside the range of Python's standard library. "
+    )
+    func = "^date"
+    with pytest.raises(NotImplementedError, match=func + msg):
+        ts.date()
+    func = "^isocalendar"
+    with pytest.raises(NotImplementedError, match=func + msg):
+        ts.isocalendar()
+    func = "^timetuple"
+    with pytest.raises(NotImplementedError, match=func + msg):
+        ts.timetuple()
+    func = "^toordinal"
+    with pytest.raises(NotImplementedError, match=func + msg):
+        ts.toordinal()
diff --git a/pandas/tests/series/indexing/test_take.py b/pandas/tests/series/indexing/test_take.py
index 91b44e9f65320..43fbae8908966 100644
--- a/pandas/tests/series/indexing/test_take.py
+++ b/pandas/tests/series/indexing/test_take.py
@@ -5,6 +5,15 @@
 import pandas._testing as tm
 
 
+def test_take_validate_axis():
+    # GH#51022
+    ser = Series([-1, 5, 6, 2, 4])
+
+    msg = "No axis named foo for object type Series"
+    with pytest.raises(ValueError, match=msg):
+        ser.take([1, 2], axis="foo")
+
+
 def test_take():
     ser = Series([-1, 5, 6, 2, 4])
 
diff --git a/pandas/tests/series/methods/test_argsort.py b/pandas/tests/series/methods/test_argsort.py
index 1fbc9ed787e11..e1d64795e235d 100644
--- a/pandas/tests/series/methods/test_argsort.py
+++ b/pandas/tests/series/methods/test_argsort.py
@@ -10,10 +10,11 @@
 
 
 class TestSeriesArgsort:
-    def _check_accum_op(self, name, ser, check_dtype=True):
-        func = getattr(np, name)
+    def test_argsort_numpy(self, datetime_series):
+        ser = datetime_series
+        func = np.argsort
         tm.assert_numpy_array_equal(
-            func(ser).values, func(np.array(ser)), check_dtype=check_dtype
+            func(ser).values, func(np.array(ser)), check_dtype=False
         )
 
         # with missing values
@@ -26,7 +27,6 @@ def _check_accum_op(self, name, ser, check_dtype=True):
         tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
 
     def test_argsort(self, datetime_series):
-        self._check_accum_op("argsort", datetime_series, check_dtype=False)
         argsorted = datetime_series.argsort()
         assert issubclass(argsorted.dtype.type, np.integer)
 
diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py
index d91cd6a43daea..f2ac5f1086625 100644
--- a/pandas/tests/series/methods/test_convert_dtypes.py
+++ b/pandas/tests/series/methods/test_convert_dtypes.py
@@ -12,149 +12,162 @@
 # this default. Those overrides are defined as a dict with (keyword, val) as
 # dictionary key. In case of multiple items, the last override takes precedence.
 
-test_cases = [
-    (
-        # data
-        [1, 2, 3],
-        # original dtype
-        np.dtype("int32"),
-        # default expected dtype
-        "Int32",
-        # exceptions on expected dtype
-        {("convert_integer", False): np.dtype("int32")},
-    ),
-    (
-        [1, 2, 3],
-        np.dtype("int64"),
-        "Int64",
-        {("convert_integer", False): np.dtype("int64")},
-    ),
-    (
-        ["x", "y", "z"],
-        np.dtype("O"),
-        pd.StringDtype(),
-        {("convert_string", False): np.dtype("O")},
-    ),
-    (
-        [True, False, np.nan],
-        np.dtype("O"),
-        pd.BooleanDtype(),
-        {("convert_boolean", False): np.dtype("O")},
-    ),
-    (
-        ["h", "i", np.nan],
-        np.dtype("O"),
-        pd.StringDtype(),
-        {("convert_string", False): np.dtype("O")},
-    ),
-    (  # GH32117
-        ["h", "i", 1],
-        np.dtype("O"),
-        np.dtype("O"),
-        {},
-    ),
-    (
-        [10, np.nan, 20],
-        np.dtype("float"),
-        "Int64",
-        {
-            ("convert_integer", False, "convert_floating", True): "Float64",
-            ("convert_integer", False, "convert_floating", False): np.dtype("float"),
-        },
-    ),
-    (
-        [np.nan, 100.5, 200],
-        np.dtype("float"),
-        "Float64",
-        {("convert_floating", False): np.dtype("float")},
-    ),
-    (
-        [3, 4, 5],
-        "Int8",
-        "Int8",
-        {},
-    ),
-    (
-        [[1, 2], [3, 4], [5]],
-        None,
-        np.dtype("O"),
-        {},
-    ),
-    (
-        [4, 5, 6],
-        np.dtype("uint32"),
-        "UInt32",
-        {("convert_integer", False): np.dtype("uint32")},
-    ),
-    (
-        [-10, 12, 13],
-        np.dtype("i1"),
-        "Int8",
-        {("convert_integer", False): np.dtype("i1")},
-    ),
-    (
-        [1.2, 1.3],
-        np.dtype("float32"),
-        "Float32",
-        {("convert_floating", False): np.dtype("float32")},
-    ),
-    (
-        [1, 2.0],
-        object,
-        "Int64",
-        {
-            ("convert_integer", False): "Float64",
-            ("convert_integer", False, "convert_floating", False): np.dtype("float"),
-            ("infer_objects", False): np.dtype("object"),
-        },
-    ),
-    (
-        [1, 2.5],
-        object,
-        "Float64",
-        {
-            ("convert_floating", False): np.dtype("float"),
-            ("infer_objects", False): np.dtype("object"),
-        },
-    ),
-    (["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),
-    (
-        pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
-        pd.DatetimeTZDtype(tz="UTC"),
-        pd.DatetimeTZDtype(tz="UTC"),
-        {},
-    ),
-    (
-        pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
-        "datetime64[ns]",
-        np.dtype("datetime64[ns]"),
-        {},
-    ),
-    (
-        pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
-        object,
-        np.dtype("datetime64[ns]"),
-        {("infer_objects", False): np.dtype("object")},
-    ),
-    (pd.period_range("1/1/2011", freq="M", periods=3), None, pd.PeriodDtype("M"), {}),
-    (
-        pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
-        None,
-        pd.IntervalDtype("int64", "right"),
-        {},
-    ),
-]
+
+@pytest.fixture(
+    params=[
+        (
+            # data
+            [1, 2, 3],
+            # original dtype
+            np.dtype("int32"),
+            # default expected dtype
+            "Int32",
+            # exceptions on expected dtype
+            {("convert_integer", False): np.dtype("int32")},
+        ),
+        (
+            [1, 2, 3],
+            np.dtype("int64"),
+            "Int64",
+            {("convert_integer", False): np.dtype("int64")},
+        ),
+        (
+            ["x", "y", "z"],
+            np.dtype("O"),
+            pd.StringDtype(),
+            {("convert_string", False): np.dtype("O")},
+        ),
+        (
+            [True, False, np.nan],
+            np.dtype("O"),
+            pd.BooleanDtype(),
+            {("convert_boolean", False): np.dtype("O")},
+        ),
+        (
+            ["h", "i", np.nan],
+            np.dtype("O"),
+            pd.StringDtype(),
+            {("convert_string", False): np.dtype("O")},
+        ),
+        (  # GH32117
+            ["h", "i", 1],
+            np.dtype("O"),
+            np.dtype("O"),
+            {},
+        ),
+        (
+            [10, np.nan, 20],
+            np.dtype("float"),
+            "Int64",
+            {
+                ("convert_integer", False, "convert_floating", True): "Float64",
+                ("convert_integer", False, "convert_floating", False): np.dtype(
+                    "float"
+                ),
+            },
+        ),
+        (
+            [np.nan, 100.5, 200],
+            np.dtype("float"),
+            "Float64",
+            {("convert_floating", False): np.dtype("float")},
+        ),
+        (
+            [3, 4, 5],
+            "Int8",
+            "Int8",
+            {},
+        ),
+        (
+            [[1, 2], [3, 4], [5]],
+            None,
+            np.dtype("O"),
+            {},
+        ),
+        (
+            [4, 5, 6],
+            np.dtype("uint32"),
+            "UInt32",
+            {("convert_integer", False): np.dtype("uint32")},
+        ),
+        (
+            [-10, 12, 13],
+            np.dtype("i1"),
+            "Int8",
+            {("convert_integer", False): np.dtype("i1")},
+        ),
+        (
+            [1.2, 1.3],
+            np.dtype("float32"),
+            "Float32",
+            {("convert_floating", False): np.dtype("float32")},
+        ),
+        (
+            [1, 2.0],
+            object,
+            "Int64",
+            {
+                ("convert_integer", False): "Float64",
+                ("convert_integer", False, "convert_floating", False): np.dtype(
+                    "float"
+                ),
+                ("infer_objects", False): np.dtype("object"),
+            },
+        ),
+        (
+            [1, 2.5],
+            object,
+            "Float64",
+            {
+                ("convert_floating", False): np.dtype("float"),
+                ("infer_objects", False): np.dtype("object"),
+            },
+        ),
+        (["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),
+        (
+            pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+            pd.DatetimeTZDtype(tz="UTC"),
+            pd.DatetimeTZDtype(tz="UTC"),
+            {},
+        ),
+        (
+            pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+            "datetime64[ns]",
+            np.dtype("datetime64[ns]"),
+            {},
+        ),
+        (
+            pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+            object,
+            np.dtype("datetime64[ns]"),
+            {("infer_objects", False): np.dtype("object")},
+        ),
+        (
+            pd.period_range("1/1/2011", freq="M", periods=3),
+            None,
+            pd.PeriodDtype("M"),
+            {},
+        ),
+        (
+            pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
+            None,
+            pd.IntervalDtype("int64", "right"),
+            {},
+        ),
+    ]
+)
+def test_cases(request):
+    return request.param
 
 
 class TestSeriesConvertDtypes:
-    @pytest.mark.parametrize(
-        "data, maindtype, expected_default, expected_other",
-        test_cases,
-    )
     @pytest.mark.parametrize("params", product(*[(True, False)] * 5))
     def test_convert_dtypes(
-        self, data, maindtype, params, expected_default, expected_other
+        self,
+        test_cases,
+        params,
     ):
+        data, maindtype, expected_default, expected_other = test_cases
         if (
             hasattr(data, "dtype")
             and data.dtype == "M8[ns]"
@@ -240,3 +253,11 @@ def test_convert_dtype_object_with_na_float(self, infer_objects, dtype):
         result = ser.convert_dtypes(infer_objects=infer_objects)
         expected = pd.Series([1.5, pd.NA], dtype=dtype)
         tm.assert_series_equal(result, expected)
+
+    def test_convert_dtypes_pyarrow_to_np_nullable(self):
+        # GH 53648
+        pytest.importorskip("pyarrow")
+        ser = pd.Series(range(2), dtype="int32[pyarrow]")
+        result = ser.convert_dtypes(dtype_backend="numpy_nullable")
+        expected = pd.Series(range(2), dtype="Int32")
+        tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_describe.py b/pandas/tests/series/methods/test_describe.py
index 0062d45cc68a0..7f706fc54897e 100644
--- a/pandas/tests/series/methods/test_describe.py
+++ b/pandas/tests/series/methods/test_describe.py
@@ -1,7 +1,7 @@
 import numpy as np
 import pytest
 
-from pandas.compat import is_numpy_dev
+from pandas.compat.numpy import np_version_gte1p25
 
 from pandas.core.dtypes.common import (
     is_complex_dtype,
@@ -166,7 +166,7 @@ def test_numeric_result_dtype(self, any_numeric_dtype):
             dtype = "complex128" if is_complex_dtype(any_numeric_dtype) else None
 
         ser = Series([0, 1], dtype=any_numeric_dtype)
-        if dtype == "complex128" and is_numpy_dev:
+        if dtype == "complex128" and np_version_gte1p25:
             with pytest.raises(
                 TypeError, match=r"^a must be an array of real numbers$"
             ):
diff --git a/pandas/tests/series/methods/test_equals.py b/pandas/tests/series/methods/test_equals.py
index 9278d1b51e1aa..b94723b7cbddf 100644
--- a/pandas/tests/series/methods/test_equals.py
+++ b/pandas/tests/series/methods/test_equals.py
@@ -5,7 +5,7 @@
 import pytest
 
 from pandas._libs.missing import is_matching_na
-from pandas.compat import is_numpy_dev
+from pandas.compat.numpy import np_version_gte1p25
 
 from pandas.core.dtypes.common import is_float
 
@@ -51,7 +51,7 @@ def test_equals_list_array(val):
 
     cm = (
         tm.assert_produces_warning(FutureWarning, check_stacklevel=False)
-        if isinstance(val, str) and not is_numpy_dev
+        if isinstance(val, str) and not np_version_gte1p25
         else nullcontext()
     )
     with cm:
diff --git a/pandas/tests/series/methods/test_explode.py b/pandas/tests/series/methods/test_explode.py
index 886152326cf3e..c8a9eb6f89fde 100644
--- a/pandas/tests/series/methods/test_explode.py
+++ b/pandas/tests/series/methods/test_explode.py
@@ -141,3 +141,25 @@ def test_explode_scalars_can_ignore_index():
     result = s.explode(ignore_index=True)
     expected = pd.Series([1, 2, 3])
     tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("ignore_index", [True, False])
+def test_explode_pyarrow_list_type(ignore_index):
+    # GH 53602
+    pa = pytest.importorskip("pyarrow")
+
+    data = [
+        [None, None],
+        [1],
+        [],
+        [2, 3],
+        None,
+    ]
+    ser = pd.Series(data, dtype=pd.ArrowDtype(pa.list_(pa.int64())))
+    result = ser.explode(ignore_index=ignore_index)
+    expected = pd.Series(
+        data=[None, None, 1, None, 2, 3, None],
+        index=None if ignore_index else [0, 0, 1, 2, 3, 3, 4],
+        dtype=pd.ArrowDtype(pa.int64()),
+    )
+    tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py
index 6f4c4ba4dd69d..8c4c5524ac3be 100644
--- a/pandas/tests/series/methods/test_interpolate.py
+++ b/pandas/tests/series/methods/test_interpolate.py
@@ -347,6 +347,8 @@ def test_interp_invalid_method(self, invalid_method):
         s = Series([1, 3, np.nan, 12, np.nan, 25])
 
         msg = f"method must be one of.* Got '{invalid_method}' instead"
+        if invalid_method is None:
+            msg = "'method' should be a string, not None"
         with pytest.raises(ValueError, match=msg):
             s.interpolate(method=invalid_method)
 
@@ -360,8 +362,10 @@ def test_interp_invalid_method_and_value(self):
         ser = Series([1, 3, np.nan, 12, np.nan, 25])
 
         msg = "Cannot pass both fill_value and method"
+        msg2 = "Series.interpolate with method=pad"
         with pytest.raises(ValueError, match=msg):
-            ser.interpolate(fill_value=3, method="pad")
+            with tm.assert_produces_warning(FutureWarning, match=msg2):
+                ser.interpolate(fill_value=3, method="pad")
 
     def test_interp_limit_forward(self):
         s = Series([1, 3, np.nan, np.nan, np.nan, 11])
@@ -470,8 +474,10 @@ def test_interp_limit_direction_raises(self, method, limit_direction, expected):
         s = Series([1, 2, 3])
 
         msg = f"`limit_direction` must be '{expected}' for method `{method}`"
+        msg2 = "Series.interpolate with method="
         with pytest.raises(ValueError, match=msg):
-            s.interpolate(method=method, limit_direction=limit_direction)
+            with tm.assert_produces_warning(FutureWarning, match=msg2):
+                s.interpolate(method=method, limit_direction=limit_direction)
 
     @pytest.mark.parametrize(
         "data, expected_data, kwargs",
@@ -513,7 +519,9 @@ def test_interp_limit_area_with_pad(self, data, expected_data, kwargs):
 
         s = Series(data)
         expected = Series(expected_data)
-        result = s.interpolate(**kwargs)
+        msg = "Series.interpolate with method=pad"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = s.interpolate(**kwargs)
         tm.assert_series_equal(result, expected)
 
     @pytest.mark.parametrize(
@@ -546,7 +554,9 @@ def test_interp_limit_area_with_backfill(self, data, expected_data, kwargs):
 
         s = Series(data)
         expected = Series(expected_data)
-        result = s.interpolate(**kwargs)
+        msg = "Series.interpolate with method=bfill"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = s.interpolate(**kwargs)
         tm.assert_series_equal(result, expected)
 
     def test_interp_limit_direction(self):
@@ -642,7 +652,15 @@ def test_interp_datetime64(self, method, tz_naive_fixture):
         df = Series(
             [1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
         )
-        result = df.interpolate(method=method)
+        warn = None if method == "nearest" else FutureWarning
+        msg = "Series.interpolate with method=pad is deprecated"
+        with tm.assert_produces_warning(warn, match=msg):
+            result = df.interpolate(method=method)
+        if warn is not None:
+            # check the "use ffill instead" is equivalent
+            alt = df.ffill()
+            tm.assert_series_equal(result, alt)
+
         expected = Series(
             [1.0, 1.0, 3.0],
             index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture),
@@ -654,7 +672,13 @@ def test_interp_pad_datetime64tz_values(self):
         dti = date_range("2015-04-05", periods=3, tz="US/Central")
         ser = Series(dti)
         ser[1] = pd.NaT
-        result = ser.interpolate(method="pad")
+
+        msg = "Series.interpolate with method=pad is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = ser.interpolate(method="pad")
+        # check the "use ffill instead" is equivalent
+        alt = ser.ffill()
+        tm.assert_series_equal(result, alt)
 
         expected = Series(dti)
         expected[1] = expected[0]
@@ -821,3 +845,11 @@ def test_interpolate_unsorted_index(self, ascending, expected_values):
         result = ts.sort_index(ascending=ascending).interpolate(method="index")
         expected = Series(data=expected_values, index=expected_values, dtype=float)
         tm.assert_series_equal(result, expected)
+
+    def test_interpolate_asfreq_raises(self):
+        ser = Series(["a", None, "b"], dtype=object)
+        msg2 = "Series.interpolate with object dtype"
+        msg = "Invalid fill method"
+        with pytest.raises(ValueError, match=msg):
+            with tm.assert_produces_warning(FutureWarning, match=msg2):
+                ser.interpolate(method="asfreq")
diff --git a/pandas/tests/series/methods/test_pct_change.py b/pandas/tests/series/methods/test_pct_change.py
index 475d729b6ce78..38a42062b275e 100644
--- a/pandas/tests/series/methods/test_pct_change.py
+++ b/pandas/tests/series/methods/test_pct_change.py
@@ -10,14 +10,21 @@
 
 class TestSeriesPctChange:
     def test_pct_change(self, datetime_series):
-        rs = datetime_series.pct_change(fill_method=None)
+        msg = (
+            "The 'fill_method' and 'limit' keywords in "
+            "Series.pct_change are deprecated"
+        )
+
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs = datetime_series.pct_change(fill_method=None)
         tm.assert_series_equal(rs, datetime_series / datetime_series.shift(1) - 1)
 
         rs = datetime_series.pct_change(2)
         filled = datetime_series.ffill()
         tm.assert_series_equal(rs, filled / filled.shift(2) - 1)
 
-        rs = datetime_series.pct_change(fill_method="bfill", limit=1)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs = datetime_series.pct_change(fill_method="bfill", limit=1)
         filled = datetime_series.bfill(limit=1)
         tm.assert_series_equal(rs, filled / filled.shift(1) - 1)
 
@@ -40,7 +47,10 @@ def test_pct_change_with_duplicate_axis(self):
     def test_pct_change_shift_over_nas(self):
         s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
 
-        chg = s.pct_change()
+        msg = "The default fill_method='pad' in Series.pct_change is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            chg = s.pct_change()
+
         expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
         tm.assert_series_equal(chg, expected)
 
@@ -58,18 +68,31 @@ def test_pct_change_shift_over_nas(self):
     def test_pct_change_periods_freq(
         self, freq, periods, fill_method, limit, datetime_series
     ):
-        # GH#7292
-        rs_freq = datetime_series.pct_change(
-            freq=freq, fill_method=fill_method, limit=limit
-        )
-        rs_periods = datetime_series.pct_change(
-            periods, fill_method=fill_method, limit=limit
+        msg = (
+            "The 'fill_method' and 'limit' keywords in "
+            "Series.pct_change are deprecated"
         )
+
+        # GH#7292
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs_freq = datetime_series.pct_change(
+                freq=freq, fill_method=fill_method, limit=limit
+            )
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs_periods = datetime_series.pct_change(
+                periods, fill_method=fill_method, limit=limit
+            )
         tm.assert_series_equal(rs_freq, rs_periods)
 
         empty_ts = Series(index=datetime_series.index, dtype=object)
-        rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
-        rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs_freq = empty_ts.pct_change(
+                freq=freq, fill_method=fill_method, limit=limit
+            )
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            rs_periods = empty_ts.pct_change(
+                periods, fill_method=fill_method, limit=limit
+            )
         tm.assert_series_equal(rs_freq, rs_periods)
 
 
@@ -77,6 +100,10 @@ def test_pct_change_periods_freq(
 def test_pct_change_with_duplicated_indices(fill_method):
     # GH30463
     s = Series([np.nan, 1, 2, 3, 9, 18], index=["a", "b"] * 3)
-    result = s.pct_change(fill_method=fill_method)
+
+    msg = "The 'fill_method' and 'limit' keywords in Series.pct_change are deprecated"
+    with tm.assert_produces_warning(FutureWarning, match=msg):
+        result = s.pct_change(fill_method=fill_method)
+
     expected = Series([np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], index=["a", "b"] * 3)
     tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py
index 16fc04d588a52..c801528e6ff97 100644
--- a/pandas/tests/series/methods/test_reindex.py
+++ b/pandas/tests/series/methods/test_reindex.py
@@ -12,6 +12,7 @@
     NaT,
     Period,
     PeriodIndex,
+    RangeIndex,
     Series,
     Timedelta,
     Timestamp,
@@ -131,6 +132,8 @@ def test_reindex_pad():
     expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8])
     tm.assert_series_equal(reindexed, expected)
 
+
+def test_reindex_pad2():
     # GH4604
     s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", "d", "e"])
     new_index = ["a", "g", "c", "f"]
@@ -147,6 +150,8 @@ def test_reindex_pad():
     result = s.reindex(new_index, method="ffill")
     tm.assert_series_equal(result, expected)
 
+
+def test_reindex_inference():
     # inference of new dtype
     s = Series([True, False, False, True], index=list("abcd"))
     new_index = "agc"
@@ -154,6 +159,8 @@ def test_reindex_pad():
     expected = Series([True, True, False], index=list(new_index))
     tm.assert_series_equal(result, expected)
 
+
+def test_reindex_downcasting():
     # GH4618 shifted series downcasting
     s = Series(False, index=range(0, 5))
     result = s.shift(1).bfill()
@@ -422,3 +429,14 @@ def test_reindexing_with_float64_NA_log():
         result_log = np.log(s_reindex)
         expected_log = Series([0, np.NaN, np.NaN], dtype=Float64Dtype())
         tm.assert_series_equal(result_log, expected_log)
+
+
+@pytest.mark.parametrize("dtype", ["timedelta64", "datetime64"])
+def test_reindex_expand_nonnano_nat(dtype):
+    # GH 53497
+    ser = Series(np.array([1], dtype=f"{dtype}[s]"))
+    result = ser.reindex(RangeIndex(2))
+    expected = Series(
+        np.array([1, getattr(np, dtype)("nat", "s")], dtype=f"{dtype}[s]")
+    )
+    tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 2880e3f3e85db..d3cdae63d26f3 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -131,12 +131,18 @@ def test_replace_gh5319(self):
         # GH 5319
         ser = pd.Series([0, np.nan, 2, 3, 4])
         expected = ser.ffill()
-        result = ser.replace([np.nan])
+        msg = (
+            "Series.replace without 'value' and with non-dict-like "
+            "'to_replace' is deprecated"
+        )
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = ser.replace([np.nan])
         tm.assert_series_equal(result, expected)
 
         ser = pd.Series([0, np.nan, 2, 3, 4])
         expected = ser.ffill()
-        result = ser.replace(np.nan)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = ser.replace(np.nan)
         tm.assert_series_equal(result, expected)
 
     def test_replace_datetime64(self):
@@ -169,11 +175,17 @@ def test_replace_timedelta_td64(self):
 
     def test_replace_with_single_list(self):
         ser = pd.Series([0, 1, 2, 3, 4])
-        result = ser.replace([1, 2, 3])
+        msg2 = (
+            "Series.replace without 'value' and with non-dict-like "
+            "'to_replace' is deprecated"
+        )
+        with tm.assert_produces_warning(FutureWarning, match=msg2):
+            result = ser.replace([1, 2, 3])
         tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
 
         s = ser.copy()
-        return_value = s.replace([1, 2, 3], inplace=True)
+        with tm.assert_produces_warning(FutureWarning, match=msg2):
+            return_value = s.replace([1, 2, 3], inplace=True)
         assert return_value is None
         tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
 
@@ -183,8 +195,10 @@ def test_replace_with_single_list(self):
             r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
             r"\(bfill\)\. Got crash_cymbal"
         )
+        msg3 = "The 'method' keyword in Series.replace is deprecated"
         with pytest.raises(ValueError, match=msg):
-            return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
+            with tm.assert_produces_warning(FutureWarning, match=msg3):
+                return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
             assert return_value is None
         tm.assert_series_equal(s, ser)
 
@@ -450,8 +464,13 @@ def test_replace_invalid_to_replace(self):
             r"Expecting 'to_replace' to be either a scalar, array-like, "
             r"dict or None, got invalid type.*"
         )
+        msg2 = (
+            "Series.replace without 'value' and with non-dict-like "
+            "'to_replace' is deprecated"
+        )
         with pytest.raises(TypeError, match=msg):
-            series.replace(lambda x: x.strip())
+            with tm.assert_produces_warning(FutureWarning, match=msg2):
+                series.replace(lambda x: x.strip())
 
     @pytest.mark.parametrize("frame", [False, True])
     def test_replace_nonbool_regex(self, frame):
@@ -502,19 +521,25 @@ def test_replace_extension_other(self, frame_or_series):
     def _check_replace_with_method(self, ser: pd.Series):
         df = ser.to_frame()
 
-        res = ser.replace(ser[1], method="pad")
+        msg1 = "The 'method' keyword in Series.replace is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg1):
+            res = ser.replace(ser[1], method="pad")
         expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
         tm.assert_series_equal(res, expected)
 
-        res_df = df.replace(ser[1], method="pad")
+        msg2 = "The 'method' keyword in DataFrame.replace is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg2):
+            res_df = df.replace(ser[1], method="pad")
         tm.assert_frame_equal(res_df, expected.to_frame())
 
         ser2 = ser.copy()
-        res2 = ser2.replace(ser[1], method="pad", inplace=True)
+        with tm.assert_produces_warning(FutureWarning, match=msg1):
+            res2 = ser2.replace(ser[1], method="pad", inplace=True)
         assert res2 is None
         tm.assert_series_equal(ser2, expected)
 
-        res_df2 = df.replace(ser[1], method="pad", inplace=True)
+        with tm.assert_produces_warning(FutureWarning, match=msg2):
+            res_df2 = df.replace(ser[1], method="pad", inplace=True)
         assert res_df2 is None
         tm.assert_frame_equal(df, expected.to_frame())
 
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 4bf16b6d20d1f..ceb283ca9e9e7 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -2154,3 +2154,18 @@ def test_index_ordered_dict_keys():
         ),
     )
     tm.assert_series_equal(series, expected)
+
+
+@pytest.mark.parametrize(
+    "input_list",
+    [
+        [1, complex("nan"), 2],
+        [1 + 1j, complex("nan"), 2 + 2j],
+    ],
+)
+def test_series_with_complex_nan(input_list):
+    # GH#53627
+    ser = Series(input_list)
+    result = Series(ser.array)
+    assert ser.dtype == "complex128"
+    tm.assert_series_equal(ser, result)
diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py
index ac36103edcdcc..38dea7dc5f8bf 100644
--- a/pandas/tests/series/test_ufunc.py
+++ b/pandas/tests/series/test_ufunc.py
@@ -11,9 +11,16 @@
 import pandas._testing as tm
 from pandas.arrays import SparseArray
 
-BINARY_UFUNCS = [np.add, np.logaddexp]  # dunder op
-SPARSE = [True, False]
-SPARSE_IDS = ["sparse", "dense"]
+
+@pytest.fixture(params=[np.add, np.logaddexp])
+def ufunc(request):
+    # dunder op
+    return request.param
+
+
+@pytest.fixture(params=[True, False], ids=["sparse", "dense"])
+def sparse(request):
+    return request.param
 
 
 @pytest.fixture
@@ -29,7 +36,6 @@ def arrays_for_binary_ufunc():
 
 
 @pytest.mark.parametrize("ufunc", [np.positive, np.floor, np.exp])
-@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
 def test_unary_ufunc(ufunc, sparse):
     # Test that ufunc(pd.Series) == pd.Series(ufunc)
     arr = np.random.randint(0, 10, 10, dtype="int64")
@@ -46,8 +52,6 @@ def test_unary_ufunc(ufunc, sparse):
     tm.assert_series_equal(result, expected)
 
 
-@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
-@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
 @pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])
 def test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc):
     # Test that ufunc(pd.Series(a), array) == pd.Series(ufunc(a, b))
@@ -72,8 +76,6 @@ def test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc):
     tm.assert_series_equal(result, expected)
 
 
-@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
-@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
 @pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])
 def test_binary_ufunc_with_index(flip, sparse, ufunc, arrays_for_binary_ufunc):
     # Test that
@@ -101,8 +103,6 @@ def test_binary_ufunc_with_index(flip, sparse, ufunc, arrays_for_binary_ufunc):
     tm.assert_series_equal(result, expected)
 
 
-@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
-@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
 @pytest.mark.parametrize("shuffle", [True, False], ids=["unaligned", "aligned"])
 @pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])
 def test_binary_ufunc_with_series(
@@ -143,8 +143,6 @@ def test_binary_ufunc_with_series(
     tm.assert_series_equal(result, expected)
 
 
-@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
-@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
 @pytest.mark.parametrize("flip", [True, False])
 def test_binary_ufunc_scalar(ufunc, sparse, flip, arrays_for_binary_ufunc):
     # Test that
@@ -170,7 +168,6 @@ def test_binary_ufunc_scalar(ufunc, sparse, flip, arrays_for_binary_ufunc):
 
 
 @pytest.mark.parametrize("ufunc", [np.divmod])  # TODO: np.modf, np.frexp
-@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
 @pytest.mark.parametrize("shuffle", [True, False])
 @pytest.mark.filterwarnings("ignore:divide by zero:RuntimeWarning")
 def test_multiple_output_binary_ufuncs(ufunc, sparse, shuffle, arrays_for_binary_ufunc):
@@ -203,7 +200,6 @@ def test_multiple_output_binary_ufuncs(ufunc, sparse, shuffle, arrays_for_binary
     tm.assert_series_equal(result[1], pd.Series(expected[1]))
 
 
-@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
 def test_multiple_output_ufunc(sparse, arrays_for_binary_ufunc):
     # Test that the same conditions from unary input apply to multi-output
     # ufuncs
@@ -223,8 +219,6 @@ def test_multiple_output_ufunc(sparse, arrays_for_binary_ufunc):
     tm.assert_series_equal(result[1], pd.Series(expected[1], name="name"))
 
 
-@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
-@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
 def test_binary_ufunc_drops_series_name(ufunc, sparse, arrays_for_binary_ufunc):
     # Drop the names when they differ.
     a1, a2 = arrays_for_binary_ufunc
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 6d09488df06e2..8c26bbd209a6a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -35,6 +35,7 @@
     Series,
     Timedelta,
     Timestamp,
+    cut,
     date_range,
     timedelta_range,
     to_datetime,
@@ -148,7 +149,7 @@ def test_mixed(self):
         exp = Index([3.14, np.inf, "A", "B"])
         tm.assert_index_equal(uniques, exp)
 
-    def test_datelike(self):
+    def test_factorize_datetime64(self):
         # M8
         v1 = Timestamp("20130101 09:00:00.00004")
         v2 = Timestamp("20130101")
@@ -166,6 +167,7 @@ def test_datelike(self):
         exp = DatetimeIndex([v2, v1])
         tm.assert_index_equal(uniques, exp)
 
+    def test_factorize_period(self):
         # period
         v1 = Period("201302", freq="M")
         v2 = Period("201303", freq="M")
@@ -182,6 +184,7 @@ def test_datelike(self):
         tm.assert_numpy_array_equal(codes, exp)
         tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
 
+    def test_factorize_timedelta(self):
         # GH 5986
         v1 = to_timedelta("1 day 1 min")
         v2 = to_timedelta("1 day")
@@ -536,11 +539,9 @@ def test_objects(self):
         assert isinstance(result, np.ndarray)
 
     def test_object_refcount_bug(self):
-        lst = ["A", "B", "C", "D", "E"]
-        msg = "unique with argument that is not not a Series"
-        with tm.assert_produces_warning(FutureWarning, match=msg):
-            for i in range(1000):
-                len(algos.unique(lst))
+        lst = np.array(["A", "B", "C", "D", "E"], dtype=object)
+        for i in range(1000):
+            len(algos.unique(lst))
 
     def test_on_index_object(self):
         mindex = MultiIndex.from_arrays(
@@ -1175,13 +1176,14 @@ def test_isin_unsigned_dtype(self):
 class TestValueCounts:
     def test_value_counts(self):
         np.random.seed(1234)
-        from pandas.core.reshape.tile import cut
 
         arr = np.random.randn(4)
         factor = cut(arr, 4)
 
         # assert isinstance(factor, n)
-        result = algos.value_counts(factor)
+        msg = "pandas.value_counts is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = algos.value_counts(factor)
         breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
         index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
         expected = Series([1, 1, 1, 1], index=index, name="count")
@@ -1189,13 +1191,16 @@ def test_value_counts(self):
 
     def test_value_counts_bins(self):
         s = [1, 2, 3, 4]
-        result = algos.value_counts(s, bins=1)
+        msg = "pandas.value_counts is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = algos.value_counts(s, bins=1)
         expected = Series(
             [4], index=IntervalIndex.from_tuples([(0.996, 4.0)]), name="count"
         )
         tm.assert_series_equal(result, expected)
 
-        result = algos.value_counts(s, bins=2, sort=False)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = algos.value_counts(s, bins=2, sort=False)
         expected = Series(
             [2, 2],
             index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)]),
@@ -1204,31 +1209,40 @@ def test_value_counts_bins(self):
         tm.assert_series_equal(result, expected)
 
     def test_value_counts_dtypes(self):
-        result = algos.value_counts(np.array([1, 1.0]))
+        msg2 = "pandas.value_counts is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg2):
+            result = algos.value_counts(np.array([1, 1.0]))
         assert len(result) == 1
 
-        result = algos.value_counts(np.array([1, 1.0]), bins=1)
+        with tm.assert_produces_warning(FutureWarning, match=msg2):
+            result = algos.value_counts(np.array([1, 1.0]), bins=1)
         assert len(result) == 1
 
-        result = algos.value_counts(Series([1, 1.0, "1"]))  # object
+        with tm.assert_produces_warning(FutureWarning, match=msg2):
+            result = algos.value_counts(Series([1, 1.0, "1"]))  # object
         assert len(result) == 2
 
         msg = "bins argument only works with numeric data"
         with pytest.raises(TypeError, match=msg):
-            algos.value_counts(np.array(["1", 1], dtype=object), bins=1)
+            with tm.assert_produces_warning(FutureWarning, match=msg2):
+                algos.value_counts(np.array(["1", 1], dtype=object), bins=1)
 
     def test_value_counts_nat(self):
         td = Series([np.timedelta64(10000), NaT], dtype="timedelta64[ns]")
         dt = to_datetime(["NaT", "2014-01-01"])
 
+        msg = "pandas.value_counts is deprecated"
+
         for s in [td, dt]:
-            vc = algos.value_counts(s)
-            vc_with_na = algos.value_counts(s, dropna=False)
+            with tm.assert_produces_warning(FutureWarning, match=msg):
+                vc = algos.value_counts(s)
+                vc_with_na = algos.value_counts(s, dropna=False)
             assert len(vc) == 1
             assert len(vc_with_na) == 2
 
         exp_dt = Series({Timestamp("2014-01-01 00:00:00"): 1}, name="count")
-        tm.assert_series_equal(algos.value_counts(dt), exp_dt)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            tm.assert_series_equal(algos.value_counts(dt), exp_dt)
         # TODO same for (timedelta)
 
     def test_value_counts_datetime_outofbounds(self):
@@ -1252,7 +1266,7 @@ def test_value_counts_datetime_outofbounds(self):
         exp = Series([3, 2, 1], index=exp_index, name="count")
         tm.assert_series_equal(res, exp)
 
-        # GH 12424
+        # GH 12424  # TODO: belongs elsewhere
         res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore")
         exp = Series(["2362-01-01", np.nan], dtype=object)
         tm.assert_series_equal(res, exp)
@@ -1388,13 +1402,16 @@ def test_value_counts_normalized(self, dtype):
     def test_value_counts_uint64(self):
         arr = np.array([2**63], dtype=np.uint64)
         expected = Series([1], index=[2**63], name="count")
-        result = algos.value_counts(arr)
+        msg = "pandas.value_counts is deprecated"
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = algos.value_counts(arr)
 
         tm.assert_series_equal(result, expected)
 
         arr = np.array([-1, 2**63], dtype=object)
         expected = Series([1, 1], index=[-1, 2**63], name="count")
-        result = algos.value_counts(arr)
+        with tm.assert_produces_warning(FutureWarning, match=msg):
+            result = algos.value_counts(arr)
 
         tm.assert_series_equal(result, expected)
 
@@ -1832,261 +1849,11 @@ def test_pad(self):
 def test_is_lexsorted():
     failure = [
         np.array(
-            [
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                3,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                2,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                1,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-                0,
-            ],
+            ([3] * 32) + ([2] * 32) + ([1] * 32) + ([0] * 32),
             dtype="int64",
         ),
         np.array(
-            [
-                30,
-                29,
-                28,
-                27,
-                26,
-                25,
-                24,
-                23,
-                22,
-                21,
-                20,
-                19,
-                18,
-                17,
-                16,
-                15,
-                14,
-                13,
-                12,
-                11,
-                10,
-                9,
-                8,
-                7,
-                6,
-                5,
-                4,
-                3,
-                2,
-                1,
-                0,
-                30,
-                29,
-                28,
-                27,
-                26,
-                25,
-                24,
-                23,
-                22,
-                21,
-                20,
-                19,
-                18,
-                17,
-                16,
-                15,
-                14,
-                13,
-                12,
-                11,
-                10,
-                9,
-                8,
-                7,
-                6,
-                5,
-                4,
-                3,
-                2,
-                1,
-                0,
-                30,
-                29,
-                28,
-                27,
-                26,
-                25,
-                24,
-                23,
-                22,
-                21,
-                20,
-                19,
-                18,
-                17,
-                16,
-                15,
-                14,
-                13,
-                12,
-                11,
-                10,
-                9,
-                8,
-                7,
-                6,
-                5,
-                4,
-                3,
-                2,
-                1,
-                0,
-                30,
-                29,
-                28,
-                27,
-                26,
-                25,
-                24,
-                23,
-                22,
-                21,
-                20,
-                19,
-                18,
-                17,
-                16,
-                15,
-                14,
-                13,
-                12,
-                11,
-                10,
-                9,
-                8,
-                7,
-                6,
-                5,
-                4,
-                3,
-                2,
-                1,
-                0,
-            ],
+            list(range(31))[::-1] * 4,
             dtype="int64",
         ),
     ]
diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py
index cefcf09613de1..47615be32e5b0 100644
--- a/pandas/tests/test_take.py
+++ b/pandas/tests/test_take.py
@@ -1,5 +1,4 @@
 from datetime import datetime
-import re
 
 import numpy as np
 import pytest
@@ -41,9 +40,6 @@ def dtype_fill_out_dtype(request):
 
 
 class TestTake:
-    # Standard incompatible fill error.
-    fill_error = re.compile("Incompatible type for fill_value")
-
     def test_1d_fill_nonna(self, dtype_fill_out_dtype):
         dtype, fill_value, out_dtype = dtype_fill_out_dtype
         data = np.random.randint(0, 2, 4).astype(dtype)
diff --git a/pandas/tests/tseries/offsets/test_custom_business_month.py b/pandas/tests/tseries/offsets/test_custom_business_month.py
index faf0f9810200b..0fff99ff8c025 100644
--- a/pandas/tests/tseries/offsets/test_custom_business_month.py
+++ b/pandas/tests/tseries/offsets/test_custom_business_month.py
@@ -11,7 +11,6 @@
     datetime,
     timedelta,
 )
-from typing import TYPE_CHECKING
 
 import numpy as np
 import pytest
@@ -34,9 +33,6 @@
 from pandas.tseries import offsets
 from pandas.tseries.holiday import USFederalHolidayCalendar
 
-if TYPE_CHECKING:
-    from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
-
 
 @pytest.fixture
 def dt():
@@ -132,7 +128,7 @@ def test_is_on_offset(self, case):
         offset, dt, expected = case
         assert_is_on_offset(offset, dt, expected)
 
-    apply_cases: _ApplyCases = [
+    apply_cases = [
         (
             CBMonthBegin(),
             {
@@ -330,7 +326,7 @@ def test_is_on_offset(self, case):
         offset, dt, expected = case
         assert_is_on_offset(offset, dt, expected)
 
-    apply_cases: _ApplyCases = [
+    apply_cases = [
         (
             CBMonthEnd(),
             {
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index bfc5139c78b91..6df47968bd3bb 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -7,11 +7,6 @@
     datetime,
     timedelta,
 )
-from typing import (
-    Dict,
-    List,
-    Tuple,
-)
 
 import numpy as np
 import pytest
@@ -42,7 +37,6 @@
 from pandas.tseries import offsets
 from pandas.tseries.offsets import (
     FY5253,
-    BaseOffset,
     BDay,
     BMonthEnd,
     BusinessHour,
@@ -61,8 +55,6 @@
     WeekOfMonth,
 )
 
-_ApplyCases = List[Tuple[BaseOffset, Dict[datetime, datetime]]]
-
 _ARITHMETIC_DATE_OFFSET = [
     "years",
     "months",
diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py
index 36ca02d32dbbd..2308aa27b60ab 100644
--- a/pandas/tests/tslibs/test_timedeltas.py
+++ b/pandas/tests/tslibs/test_timedeltas.py
@@ -72,6 +72,15 @@ def test_delta_to_nanoseconds_td64_MY_raises():
         delta_to_nanoseconds(td)
 
 
+@pytest.mark.parametrize("unit", ["Y", "M"])
+def test_unsupported_td64_unit_raises(unit):
+    # GH 52806
+    with pytest.raises(
+        ValueError, match=f"cannot construct a Timedelta from a unit {unit}"
+    ):
+        Timedelta(np.timedelta64(1, unit))
+
+
 def test_huge_nanoseconds_overflow():
     # GH 32402
     assert delta_to_nanoseconds(Timedelta(1e10)) == 1e10
diff --git a/pandas/tests/util/test_validate_args.py b/pandas/tests/util/test_validate_args.py
index 77e6b01ba1180..eef0931ec28ef 100644
--- a/pandas/tests/util/test_validate_args.py
+++ b/pandas/tests/util/test_validate_args.py
@@ -2,17 +2,20 @@
 
 from pandas.util._validators import validate_args
 
-_fname = "func"
 
+@pytest.fixture
+def _fname():
+    return "func"
 
-def test_bad_min_fname_arg_count():
+
+def test_bad_min_fname_arg_count(_fname):
     msg = "'max_fname_arg_count' must be non-negative"
 
     with pytest.raises(ValueError, match=msg):
         validate_args(_fname, (None,), -1, "foo")
 
 
-def test_bad_arg_length_max_value_single():
+def test_bad_arg_length_max_value_single(_fname):
     args = (None, None)
     compat_args = ("foo",)
 
@@ -28,7 +31,7 @@ def test_bad_arg_length_max_value_single():
         validate_args(_fname, args, min_fname_arg_count, compat_args)
 
 
-def test_bad_arg_length_max_value_multiple():
+def test_bad_arg_length_max_value_multiple(_fname):
     args = (None, None)
     compat_args = {"foo": None}
 
@@ -45,7 +48,7 @@ def test_bad_arg_length_max_value_multiple():
 
 
 @pytest.mark.parametrize("i", range(1, 3))
-def test_not_all_defaults(i):
+def test_not_all_defaults(i, _fname):
     bad_arg = "foo"
     msg = (
         f"the '{bad_arg}' parameter is not supported "
@@ -59,7 +62,7 @@ def test_not_all_defaults(i):
         validate_args(_fname, arg_vals[:i], 2, compat_args)
 
 
-def test_validation():
+def test_validation(_fname):
     # No exceptions should be raised.
     validate_args(_fname, (None,), 2, {"out": None})
 
diff --git a/pandas/tests/util/test_validate_args_and_kwargs.py b/pandas/tests/util/test_validate_args_and_kwargs.py
index 54d94d2194909..215026d648471 100644
--- a/pandas/tests/util/test_validate_args_and_kwargs.py
+++ b/pandas/tests/util/test_validate_args_and_kwargs.py
@@ -2,10 +2,13 @@
 
 from pandas.util._validators import validate_args_and_kwargs
 
-_fname = "func"
 
+@pytest.fixture
+def _fname():
+    return "func"
 
-def test_invalid_total_length_max_length_one():
+
+def test_invalid_total_length_max_length_one(_fname):
     compat_args = ("foo",)
     kwargs = {"foo": "FOO"}
     args = ("FoO", "BaZ")
@@ -23,7 +26,7 @@ def test_invalid_total_length_max_length_one():
         validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
 
 
-def test_invalid_total_length_max_length_multiple():
+def test_invalid_total_length_max_length_multiple(_fname):
     compat_args = ("foo", "bar", "baz")
     kwargs = {"foo": "FOO", "bar": "BAR"}
     args = ("FoO", "BaZ")
@@ -42,7 +45,7 @@ def test_invalid_total_length_max_length_multiple():
 
 
 @pytest.mark.parametrize("args,kwargs", [((), {"foo": -5, "bar": 2}), ((-5, 2), {})])
-def test_missing_args_or_kwargs(args, kwargs):
+def test_missing_args_or_kwargs(args, kwargs, _fname):
     bad_arg = "bar"
     min_fname_arg_count = 2
 
@@ -57,7 +60,7 @@ def test_missing_args_or_kwargs(args, kwargs):
         validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
 
 
-def test_duplicate_argument():
+def test_duplicate_argument(_fname):
     min_fname_arg_count = 2
 
     compat_args = {"foo": None, "bar": None, "baz": None}
@@ -70,7 +73,7 @@ def test_duplicate_argument():
         validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
 
 
-def test_validation():
+def test_validation(_fname):
     # No exceptions should be raised.
     compat_args = {"foo": 1, "bar": None, "baz": -2}
     kwargs = {"baz": -2}
diff --git a/pandas/tests/util/test_validate_kwargs.py b/pandas/tests/util/test_validate_kwargs.py
index de49cdd5e247d..dba447e30cf57 100644
--- a/pandas/tests/util/test_validate_kwargs.py
+++ b/pandas/tests/util/test_validate_kwargs.py
@@ -5,10 +5,13 @@
     validate_kwargs,
 )
 
-_fname = "func"
 
+@pytest.fixture
+def _fname():
+    return "func"
 
-def test_bad_kwarg():
+
+def test_bad_kwarg(_fname):
     good_arg = "f"
     bad_arg = good_arg + "o"
 
@@ -22,7 +25,7 @@ def test_bad_kwarg():
 
 
 @pytest.mark.parametrize("i", range(1, 3))
-def test_not_all_none(i):
+def test_not_all_none(i, _fname):
     bad_arg = "foo"
     msg = (
         rf"the '{bad_arg}' parameter is not supported "
@@ -40,7 +43,7 @@ def test_not_all_none(i):
         validate_kwargs(_fname, kwargs, compat_args)
 
 
-def test_validation():
+def test_validation(_fname):
     # No exceptions should be raised.
     compat_args = {"f": None, "b": 1, "ba": "s"}
 
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index dbafcf12513e0..f5ef6a00e0b32 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -17,13 +17,15 @@
 )
 import pandas._testing as tm
 
-# TODO(GH#44584): Mark these as pytest.mark.single_cpu
-pytestmark = pytest.mark.skipif(
-    is_ci_environment() and (is_platform_windows() or is_platform_mac()),
-    reason="On GHA CI, Windows can fail with "
-    "'Windows fatal exception: stack overflow' "
-    "and macOS can timeout",
-)
+pytestmark = [
+    pytest.mark.single_cpu,
+    pytest.mark.skipif(
+        is_ci_environment() and (is_platform_windows() or is_platform_mac()),
+        reason="On GHA CI, Windows can fail with "
+        "'Windows fatal exception: stack overflow' "
+        "and macOS can timeout",
+    ),
+]
 
 
 @pytest.fixture(params=["single", "table"])
diff --git a/pandas/tests/window/test_online.py b/pandas/tests/window/test_online.py
index 875adf6cef4ac..5974de0ae4009 100644
--- a/pandas/tests/window/test_online.py
+++ b/pandas/tests/window/test_online.py
@@ -14,13 +14,15 @@
 )
 import pandas._testing as tm
 
-# TODO(GH#44584): Mark these as pytest.mark.single_cpu
-pytestmark = pytest.mark.skipif(
-    is_ci_environment() and (is_platform_windows() or is_platform_mac()),
-    reason="On GHA CI, Windows can fail with "
-    "'Windows fatal exception: stack overflow' "
-    "and macOS can timeout",
-)
+pytestmark = [
+    pytest.mark.single_cpu,
+    pytest.mark.skipif(
+        is_ci_environment() and (is_platform_windows() or is_platform_mac()),
+        reason="On GHA CI, Windows can fail with "
+        "'Windows fatal exception: stack overflow' "
+        "and macOS can timeout",
+    ),
+]
 
 
 @td.skip_if_no("numba")
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 084b4b606ad5e..381272ff691fe 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -9,7 +9,6 @@
 from pandas.compat import (
     IS64,
     is_platform_arm,
-    is_platform_mac,
     is_platform_power,
 )
 
@@ -1189,7 +1188,7 @@ def test_rolling_sem(frame_or_series):
 
 
 @pytest.mark.xfail(
-    (is_platform_arm() and not is_platform_mac()) or is_platform_power(),
+    is_platform_arm() or is_platform_power(),
     reason="GH 38921",
 )
 @pytest.mark.parametrize(
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index ec54f0205f79a..20cc83e86b435 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -114,9 +114,7 @@ def _skip_if_no_scipy() -> bool:
     )
 
 
-# TODO(pytest#7469): return type, _pytest.mark.structures.MarkDecorator is not public
-# https://github.com/pytest-dev/pytest/issues/7469
-def skip_if_installed(package: str):
+def skip_if_installed(package: str) -> pytest.MarkDecorator:
     """
     Skip a test if a package is installed.
 
@@ -124,15 +122,19 @@ def skip_if_installed(package: str):
     ----------
     package : str
         The name of the package.
+
+    Returns
+    -------
+    pytest.MarkDecorator
+        a pytest.mark.skipif to use as either a test decorator or a
+        parametrization mark.
     """
     return pytest.mark.skipif(
         safe_import(package), reason=f"Skipping because {package} is installed."
     )
 
 
-# TODO(pytest#7469): return type, _pytest.mark.structures.MarkDecorator is not public
-# https://github.com/pytest-dev/pytest/issues/7469
-def skip_if_no(package: str, min_version: str | None = None):
+def skip_if_no(package: str, min_version: str | None = None) -> pytest.MarkDecorator:
     """
     Generic function to help skip tests when required packages are not
     present on the testing system.
@@ -158,7 +160,7 @@ def skip_if_no(package: str, min_version: str | None = None):
 
     Returns
     -------
-    _pytest.mark.structures.MarkDecorator
+    pytest.MarkDecorator
         a pytest.mark.skipif to use as either a test decorator or a
         parametrization mark.
     """
@@ -189,9 +191,9 @@ def skip_if_no(package: str, min_version: str | None = None):
 )
 
 
-# TODO(pytest#7469): return type, _pytest.mark.structures.MarkDecorator is not public
-# https://github.com/pytest-dev/pytest/issues/7469
-def skip_if_np_lt(ver_str: str, *args, reason: str | None = None):
+def skip_if_np_lt(
+    ver_str: str, *args, reason: str | None = None
+) -> pytest.MarkDecorator:
     if reason is None:
         reason = f"NumPy {ver_str} or greater required"
     return pytest.mark.skipif(
diff --git a/pyproject.toml b/pyproject.toml
index 6f91aa2360406..0d1bca886a638 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -56,7 +56,7 @@ repository = 'https://github.com/pandas-dev/pandas'
 matplotlib = "pandas:plotting._matplotlib"
 
 [project.optional-dependencies]
-test = ['hypothesis>=6.46.1', 'pytest>=7.0.0', 'pytest-xdist>=2.2.0', 'pytest-asyncio>=0.17.0']
+test = ['hypothesis>=6.46.1', 'pytest>=7.3.2', 'pytest-xdist>=2.2.0', 'pytest-asyncio>=0.17.0']
 performance = ['bottleneck>=1.3.4', 'numba>=0.55.2', 'numexpr>=2.8.0']
 computation = ['scipy>=1.8.1', 'xarray>=2022.03.0']
 fss = ['fsspec>=2022.05.0']
@@ -101,7 +101,7 @@ all = ['beautifulsoup4>=4.11.1',
        'pymysql>=1.0.2',
        'PyQt5>=5.15.6',
        'pyreadstat>=1.1.5',
-       'pytest>=7.0.0',
+       'pytest>=7.3.2',
        'pytest-xdist>=2.2.0',
        'pytest-asyncio>=0.17.0',
        'python-snappy>=0.6.1',
@@ -146,7 +146,7 @@ setup = ['--vsenv'] # For Windows
 skip = "cp36-* cp37-* cp38-* pp* *_i686 *_ppc64le *_s390x *-musllinux_aarch64"
 build-verbosity = "3"
 environment = {LDFLAGS="-Wl,--strip-all"}
-test-requires = "hypothesis>=6.46.1 pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17"
+test-requires = "hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17"
 test-command = """
   PANDAS_CI='1' python -c 'import pandas as pd; \
   pd.test(extra_args=["-m not clipboard and not single_cpu and not slow and not network and not db", "-n 2"]); \
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 546116b1fa23d..38a2ce7f66aa3 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -6,7 +6,7 @@ versioneer[toml]
 cython==0.29.33
 meson[ninja]==1.0.1
 meson-python==0.13.1
-pytest>=7.0.0
+pytest>=7.3.2
 pytest-cov
 pytest-xdist>=2.2.0
 pytest-asyncio>=0.17.0
diff --git a/scripts/tests/conftest.py b/scripts/tests/conftest.py
deleted file mode 100644
index 496a5195bfc84..0000000000000
--- a/scripts/tests/conftest.py
+++ /dev/null
@@ -1,6 +0,0 @@
-def pytest_addoption(parser):
-    parser.addoption(
-        "--strict-data-files",
-        action="store_true",
-        help="Unused. For compat with setup.cfg.",
-    )
diff --git a/scripts/tests/data/deps_expected_random.yaml b/scripts/tests/data/deps_expected_random.yaml
index be5e467b57e10..35d7fe74806a9 100644
--- a/scripts/tests/data/deps_expected_random.yaml
+++ b/scripts/tests/data/deps_expected_random.yaml
@@ -10,7 +10,7 @@ dependencies:
   - cython>=0.29.32
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - psutil
diff --git a/scripts/tests/data/deps_minimum.toml b/scripts/tests/data/deps_minimum.toml
index 97a5ce1180bfb..6f56ca498794b 100644
--- a/scripts/tests/data/deps_minimum.toml
+++ b/scripts/tests/data/deps_minimum.toml
@@ -55,7 +55,7 @@ repository = 'https://github.com/pandas-dev/pandas'
 matplotlib = "pandas:plotting._matplotlib"
 
 [project.optional-dependencies]
-test = ['hypothesis>=6.34.2', 'pytest>=7.0.0', 'pytest-xdist>=2.2.0', 'pytest-asyncio>=0.17.0']
+test = ['hypothesis>=6.34.2', 'pytest>=7.3.2', 'pytest-xdist>=2.2.0', 'pytest-asyncio>=0.17.0']
 performance = ['bottleneck>=1.3.2', 'numba>=0.53.1', 'numexpr>=2.7.1']
 timezone = ['tzdata>=2022.1']
 computation = ['scipy>=1.7.1', 'xarray>=0.21.0']
@@ -101,7 +101,7 @@ all = ['beautifulsoup4>=5.9.3',
        'pymysql>=1.0.2',
        'PyQt5>=5.15.1',
        'pyreadstat>=1.1.2',
-       'pytest>=7.0.0',
+       'pytest>=7.3.2',
        'pytest-xdist>=2.2.0',
        'pytest-asyncio>=0.17.0',
        'python-snappy>=0.6.0',
@@ -143,7 +143,7 @@ parentdir_prefix = "pandas-"
 [tool.cibuildwheel]
 skip = "cp36-* cp37-* pp37-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux*"
 build-verbosity = "3"
-test-requires = "hypothesis>=6.34.2 pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17"
+test-requires = "hypothesis>=6.34.2 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17"
 test-command = "python {project}/ci/test_wheels.py"
 
 [tool.cibuildwheel.macos]
diff --git a/scripts/tests/data/deps_unmodified_random.yaml b/scripts/tests/data/deps_unmodified_random.yaml
index 4ca758af1c8ad..405762d33f53e 100644
--- a/scripts/tests/data/deps_unmodified_random.yaml
+++ b/scripts/tests/data/deps_unmodified_random.yaml
@@ -10,7 +10,7 @@ dependencies:
   - cython>=0.29.32
 
   # test dependencies
-  - pytest>=7.0.0
+  - pytest>=7.3.2
   - pytest-cov
   - pytest-xdist>=2.2.0
   - psutil
diff --git a/web/pandas/about/governance.md b/web/pandas/about/governance.md
index 0bb61592d7e5d..46480acc69c31 100644
--- a/web/pandas/about/governance.md
+++ b/web/pandas/about/governance.md
@@ -228,7 +228,7 @@ interactions with NumFOCUS.
   Team.
 - This Subcommittee shall NOT make decisions about the direction, scope or
   technical direction of the Project.
-- This Subcommittee will have at least 5 members. No more than 2 Subcommitee
+- This Subcommittee will have at least 5 members. No more than 2 Subcommittee
   Members can report to one person (either directly or indirectly) through
   employment or contracting work (including the reportee, i.e. the reportee + 1
   is the max). This avoids effective majorities resting on one person.
diff --git a/web/pandas/pdeps/0009-io-extensions.md b/web/pandas/pdeps/0009-io-extensions.md
new file mode 100644
index 0000000000000..aeda990cea7df
--- /dev/null
+++ b/web/pandas/pdeps/0009-io-extensions.md
@@ -0,0 +1,406 @@
+# PDEP-9: Allow third-party projects to register pandas connectors with a standard API
+
+- Created: 5 March 2023
+- Status: Rejected
+- Discussion: [#51799](https://github.com/pandas-dev/pandas/pull/51799)
+              [#53005](https://github.com/pandas-dev/pandas/pull/53005)
+- Author: [Marc Garcia](https://github.com/datapythonista)
+- Revision: 1
+
+## PDEP Summary
+
+This document proposes that third-party projects implementing I/O or memory
+connectors to pandas can register them using Python's entrypoint system,
+and make them available to pandas users with the usual pandas I/O interface.
+For example, packages independent from pandas could implement readers from
+DuckDB and writers to Delta Lake, and when installed in the user environment
+the user would be able to use them as if they were implemented in pandas.
+For example:
+
+```python
+import pandas
+
+pandas.load_io_plugins()
+
+df = pandas.DataFrame.read_duckdb("SELECT * FROM 'my_dataset.parquet';")
+
+df.to_deltalake('/delta/my_dataset')
+```
+
+This would allow to easily extend the existing number of connectors, adding
+support to new formats and database engines, data lake technologies,
+out-of-core connectors, the new ADBC interface, and others, and at the
+same time reduce the maintenance cost of the pandas code base.
+
+## Current state
+
+pandas supports importing and exporting data from different formats using
+I/O connectors, currently implemented in `pandas/io`, as well as connectors
+to in-memory structures like Python structures or other library formats.
+In many cases, those connectors wrap an existing Python library, while in
+some others, pandas implements the logic to read and write to a particular
+format.
+
+In some cases, different engines exist for the same format. The API to use
+those connectors is `pandas.read_<format>(engine='<engine-name>', ...)` to
+import data, and `DataFrame.to_<format>(engine='<engine-name>', ...)` to
+export data.
+
+For objects exported to memory (like a Python dict) the API is the same as
+for I/O, `DataFrame.to_<format>(...)`. For formats imported from objects in
+memory, the API is different using the `from_` prefix instead of `read_`,
+`DataFrame.from_<format>(...)`.
+
+In some cases, the pandas API provides `DataFrame.to_*` methods that are not
+used to export the data to a disk or memory object, but instead to transform
+the index of a `DataFrame`: `DataFrame.to_period` and `DataFrame.to_timestamp`.
+
+Dependencies of the connectors are not loaded by default, and are
+imported when the connector is used. If the dependencies are not installed
+an `ImportError` is raised.
+
+```python
+>>> pandas.read_gbq(query)
+Traceback (most recent call last):
+  ...
+ImportError: Missing optional dependency 'pandas-gbq'.
+pandas-gbq is required to load data from Google BigQuery.
+See the docs: https://pandas-gbq.readthedocs.io.
+Use pip or conda to install pandas-gbq.
+```
+
+### Supported formats
+
+The list of formats can be found in the
+[IO guide](https://pandas.pydata.org/docs/dev/user_guide/io.html).
+A more detailed table, including in memory objects, and I/O connectors in the
+DataFrame styler is presented next:
+
+| Format       | Reader | Writer | Engines                                                                           |
+|--------------|--------|--------|-----------------------------------------------------------------------------------|
+| CSV          | X      | X      | `c`, `python`, `pyarrow`                                                          |
+| FWF          | X      |        | `c`, `python`, `pyarrow`                                                          |
+| JSON         | X      | X      | `ujson`, `pyarrow`                                                                |
+| HTML         | X      | X      | `lxml`, `bs4/html5lib` (parameter `flavor`)                                       |
+| LaTeX        |        | X      |                                                                                   |
+| XML          | X      | X      | `lxml`, `etree` (parameter `parser`)                                              |
+| Clipboard    | X      | X      |                                                                                   |
+| Excel        | X      | X      | `xlrd`, `openpyxl`, `odf`, `pyxlsb` (each engine supports different file formats) |
+| HDF5         | X      | X      |                                                                                   |
+| Feather      | X      | X      |                                                                                   |
+| Parquet      | X      | X      | `pyarrow`, `fastparquet`                                                          |
+| ORC          | X      | X      |                                                                                   |
+| Stata        | X      | X      |                                                                                   |
+| SAS          | X      |        |                                                                                   |
+| SPSS         | X      |        |                                                                                   |
+| Pickle       | X      | X      |                                                                                   |
+| SQL          | X      | X      | `sqlalchemy`, `dbapi2` (inferred from the type of the `con` parameter)            |
+| BigQuery     | X      | X      |                                                                                   |
+| dict         | X      | X      |                                                                                   |
+| records      | X      | X      |                                                                                   |
+| string       |        | X      |                                                                                   |
+| markdown     |        | X      |                                                                                   |
+| xarray       |        | X      |                                                                                   |
+
+At the time of writing this document, the `io/` module contains
+close to 100,000 lines of Python, C and Cython code.
+
+There is no objective criteria for when a format is included
+in pandas, and the list above is mostly the result of a developer
+being interested in implementing the connectors for a certain
+format in pandas.
+
+The number of existing formats available for data that can be processed with
+pandas is constantly increasing, and its difficult for pandas to keep up to
+date even with popular formats. It possibly makes sense to have connectors
+to PyArrow, PySpark, Iceberg, DuckDB, Hive, Polars, and many others.
+
+At the same time, some of the formats are not frequently used as shown in the
+[2019 user survey](https://pandas.pydata.org//community/blog/2019-user-survey.html).
+Those less popular formats include SPSS, SAS, Google BigQuery and
+Stata. Note that only I/O formats (and not memory formats like records or xarray)
+were included in the survey.
+
+The maintenance cost of supporting all formats is not only in maintaining the
+code and reviewing pull requests, but also it has a significant cost in time
+spent on CI systems installing dependencies, compiling code, running tests, etc.
+
+In some cases, the main maintainers of some of the connectors are not part of
+the pandas core development team, but people specialized in one of the formats.
+
+## Proposal
+
+While the current pandas approach has worked reasonably well, it is difficult
+to find a stable solution where the maintenance incurred in pandas is not
+too big, while at the same time users can interact with all different formats
+and representations they are interested in, in an easy and intuitive way.
+
+Third-party packages are already able to implement connectors to pandas, but
+there are some limitations to it:
+
+- Given the large number of formats supported by pandas itself, third-party
+  connectors are likely seen as second class citizens, not important enough
+  to be used, or not well supported.
+- There is no standard API for external I/O connectors, and users need
+  to learn each of them individually. Since the pandas I/O API is inconsistent
+  by using read/to instead of read/write or from/to, developers in many cases
+  ignore the convention. Also, even if developers follow the pandas convention
+  the namespaces would be different, since developers of connectors will rarely
+  monkeypatch their functions into the `pandas` or `DataFrame` namespace.
+- Method chaining is not possible with third-party I/O connectors to export
+  data, unless authors monkey patch the `DataFrame` class, which should not
+  be encouraged.
+
+This document proposes to open the development of pandas I/O connectors to
+third-party libraries in a standard way that overcomes those limitations.
+
+### Proposal implementation
+
+Implementing this proposal would not require major changes to pandas, and
+the API defined next would be used.
+
+#### User API
+
+Users will be able to install third-party packages implementing pandas
+connectors using the standard packaging tools (pip, conda, etc.). These
+connectors should implement entrypoints that pandas will use to
+automatically create the corresponding methods `pandas.read_*`,
+`pandas.DataFrame.to_*` and `pandas.Series.to_*`. Arbitrary function or
+method names will not be created by this interface, only the `read_*`
+and `to_*` pattern will be allowed.
+
+By simply installing the appropriate packages and calling the function
+`pandas.load_io_plugins()` users will be able to use code like this:
+
+```python
+import pandas
+
+pandas.load_io_plugins()
+
+df = pandas.read_duckdb("SELECT * FROM 'dataset.parquet';")
+
+df.to_hive(hive_conn, "hive_table")
+```
+
+This API allows for method chaining:
+
+```python
+(pandas.read_duckdb("SELECT * FROM 'dataset.parquet';")
+       .to_hive(hive_conn, "hive_table"))
+```
+
+The total number of I/O functions and methods is expected to be small, as users
+in general use only a small subset of formats. The number could actually be
+reduced from the current state if the less popular formats (such as SAS, SPSS,
+BigQuery, etc.) are removed from the pandas core into third-party packages.
+Moving these connectors is not part of this proposal, and could be discussed
+later in a separate proposal.
+
+#### Plugin registration
+
+Third-party packages would implement
+[entrypoints](https://setuptools.pypa.io/en/latest/userguide/entry_point.html#entry-points-for-plugins)
+to define the connectors that they implement, under a group `dataframe.io`.
+
+For example, a hypothetical project `pandas_duckdb` implementing a `read_duckdb`
+function, could use `pyproject.toml` to define the next entry point:
+
+```toml
+[project.entry-points."dataframe.io"]
+reader_duckdb = "pandas_duckdb:read_duckdb"
+```
+
+When the user calls `pandas.load_io_plugins()`, it would read the entrypoint registry for the
+`dataframe.io` group, and would dynamically create methods in the `pandas`,
+`pandas.DataFrame` and `pandas.Series` namespaces for them. Only entrypoints with
+name starting by `reader_` or `writer_` would be processed by pandas, and the functions
+registered in the entrypoint would be made available to pandas users in the corresponding
+pandas namespaces. The text after the keywords `reader_` and `writer_` would be used
+for the name of the function. In the example above, the entrypoint name `reader_duckdb`
+would create `pandas.read_duckdb`. An entrypoint with name `writer_hive` would create
+the methods `DataFrame.to_hive` and `Series.to_hive`.
+
+Entrypoints not starting with `reader_` or `writer_` would be ignored by this interface,
+but will not raise an exception since they can be used for future extensions of this
+API, or other related dataframe I/O interfaces.
+
+#### Internal API
+
+Connectors will use the dataframe interchange API to provide data to pandas. When
+data is read from a connector, and before returning it to the user as a response
+to `pandas.read_<format>`, data will be parsed from the data interchange interface
+and converted to a pandas DataFrame. In practice, connectors are likely to return
+a pandas DataFrame or a PyArrow Table, but the interface will support any object
+implementing the dataframe interchange API.
+
+#### Connector guidelines
+
+In order to provide a better and more consistent experience to users, guidelines
+will be created to unify terminology and behavior. Some of the topics to unify are
+defined next.
+
+**Guidelines to avoid name conflicts**. Since it is expected that more than one
+implementation exists for certain formats, as it already happens, guidelines on
+how to name connectors would be created. The easiest approach is probably to use
+as the format a string of the type `to_<format>_<implementation-id>` if it is
+expected that more than one connector can exist. For example, for LanceDB it is likely
+that only one connector exist, and the name `lance` can be used (which would create
+`pandas.read_lance` or `DataFrame.to_lance`. But if a new `csv` reader based in the
+Arrow2 Rust implementation, the guidelines can recommend to use `csv_arrow2` to
+create `pandas.read_csv_arrow2`, etc.
+
+**Existence and naming of parameters**, since many connectors are likely to provide
+similar features, like loading only a subset of columns in the data, or dealing
+with paths. Examples of recommendations to connector developers could be:
+
+- `columns`: Use this argument to let the user load a subset of columns. Allow a
+  list or tuple.
+- `path`: Use this argument if the dataset is a file in the file disk. Allow a string,
+  a `pathlib.Path` object, or a file descriptor. For a string object, allow URLs that
+  will be automatically download, compressed files that will be automatically
+  uncompressed, etc. Specific libraries can be recommended to deal with those in an
+  easier and more consistent way.
+- `schema`: For datasets that don't have a schema (e.g. `csv`), allow providing an
+  Apache Arrow schema instance, and automatically infer types if not provided.
+
+Note that the above are only examples of guidelines for illustration, and not
+a proposal of the guidelines, which would be developed independently after this
+PDEP is approved.
+
+**Connector registry and documentation**. To simplify the discovery of connectors
+and its documentation, connector developers can be encourage to register their
+projects in a central location, and to use a standard structure for documentation.
+This would allow the creation of a unified website to find the available
+connectors, and their documentation. It would also allow to customize the
+documentation for specific implementations, and include their final API.
+
+### Connector examples
+
+This section lists specific examples of connectors that could immediately
+benefit from this proposal.
+
+**PyArrow** currently provides `Table.from_pandas` and `Table.to_pandas`.
+With the new interface, it could also register `DataFrame.from_pyarrow`
+and `DataFrame.to_pyarrow`, so pandas users can use the converters with
+the interface they are used to, when PyArrow is installed in the environment.
+Better integration with PyArrow tables was discussed in
+[#51760](https://github.com/pandas-dev/pandas/issues/51760).
+
+_Current API_:
+
+```python
+pyarrow.Table.from_pandas(table.to_pandas()
+                               .query('my_col > 0'))
+```
+
+_Proposed API_:
+
+```python
+(pandas.read_pyarrow(table)
+       .query('my_col > 0')
+       .to_pyarrow())
+```
+
+**Polars**, **Vaex** and other dataframe frameworks could benefit from
+third-party projects that make the interoperability with pandas use a
+more explicitly API. Integration with Polars was requested in
+[#47368](https://github.com/pandas-dev/pandas/issues/47368).
+
+_Current API_:
+
+```python
+polars.DataFrame(df.to_pandas()
+                   .query('my_col > 0'))
+```
+
+_Proposed API_:
+
+```python
+(pandas.read_polars(df)
+       .query('my_col > 0')
+       .to_polars())
+```
+
+**DuckDB** provides an out-of-core engine able to push predicates before
+the data is loaded, making much better use of memory and significantly
+decreasing loading time. pandas, because of its eager nature is not able
+to easily implement this itself, but could benefit from a DuckDB loader.
+The loader can already be implemented inside pandas (it has already been
+proposed in [#45678](https://github.com/pandas-dev/pandas/issues/45678),
+or as a third-party extension with an arbitrary API. But this proposal would
+let the creation of a third-party extension with a standard and intuitive API:
+
+```python
+pandas.read_duckdb("SELECT *
+                    FROM 'dataset.parquet'
+                    WHERE my_col > 0")
+```
+
+**Out-of-core algorithms** push some operations like filtering or grouping
+to the loading of the data. While this is not currently possible, connectors
+implementing out-of-core algorithms could be developed using this interface.
+
+**Big data** systems such as Hive, Iceberg, Presto, etc. could benefit
+from a standard way to load data to pandas. Also regular **SQL databases**
+that can return their query results as Arrow, would benefit from better
+and faster connectors than the existing ones based on SQL Alchemy and
+Python structures.
+
+Any other format, including **domain-specific formats** could easily
+implement pandas connectors with a clear and intuitive API.
+
+### Limitations
+
+The implementation of this proposal has some limitations discussed here:
+
+- **Lack of support for multiple engines.** The current pandas I/O API
+  supports multiple engines for the same format (for the same function or
+  method name). For example `read_csv(engine='pyarrow', ...)`. Supporting
+  engines requires that all engines for a particular format use the same
+  signature (the same parameters), which is not ideal. Different connectors
+  are likely to have different parameters and using `*args` and `**kwargs`
+  provides users with a more complex and difficult experience. For this
+  reason this proposal prefers that function and method names are unique
+  instead of supporting an option for engines.
+- **Lack of support for type checking of connectors.** This PDEP proposes
+  creating functions and methods dynamically, and those are not supported
+  for type checking using stubs. This is already the case for other
+  dynamically created components of pandas, such as custom accessors.
+- **No improvements to the current I/O API**. In the discussions of this
+  proposal it has been considered to improve the current pandas I/O API to
+  fix the inconsistency of using `read` / `to` (instead of for example
+  `read` / `write`), avoid using `to_` prefixed methods for non-I/O
+  operations, or using a dedicated namespace (e.g. `DataFrame.io`) for
+  the connectors. All of these changes are out of scope for this PDEP.
+
+## Future plans
+
+This PDEP is exclusively to support a better API for existing of future
+connectors. It is out of scope for this PDEP to implement changes to any
+connectors existing in the pandas code base.
+
+Some ideas for future discussion related to this PDEP include:
+
+- Automatically loading of I/O plugins when pandas is imported.
+
+- Removing from the pandas code base some of the least frequently used connectors,
+such as SAS, SPSS or Google BigQuery, and move them to third-party connectors
+registered with this interface.
+
+- Discussing a better API for pandas connectors. For example, using `read_*`
+methods instead of `from_*` methods, renaming `to_*` methods not used as I/O
+connectors, using a consistent terminology like from/to, read/write, load/dump, etc.
+or using a dedicated namespace for connectors (e.g. `pandas.io` instead of the
+general `pandas` namespace).
+
+- Implement as I/O connectors some of the formats supported by the `DataFrame`
+constructor.
+
+## PDEP-9 History
+
+- 5 March 2023: Initial version
+- 30 May 2023: Major refactoring to use the pandas existing API,
+  the dataframe interchange API and to make the user be explicit to load
+  the plugins
+- 13 June 2023: The PDEP did not get any support after several iterations,
+  and its been closed as rejected by the author