From 7e461a18d9f6928132afec6f48ce968b3e989ba6 Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Mon, 3 Dec 2018 17:43:52 +0100
Subject: [PATCH 01/48] remove \n from docstring

---
 pandas/core/arrays/datetimes.py  | 26 +++++++++++++-------------
 pandas/core/arrays/timedeltas.py | 16 ++++++++--------
 2 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index cfe3afcf3730a..b3df505d56d78 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -82,7 +82,7 @@ def f(self):
         return result
 
     f.__name__ = name
-    f.__doc__ = docstring
+    f.__doc__ = "\n{}\n".format(docstring)
     return property(f)
 
 
@@ -1072,19 +1072,19 @@ def date(self):
 
         return tslib.ints_to_pydatetime(timestamps, box="date")
 
-    year = _field_accessor('year', 'Y', "\n The year of the datetime\n")
+    year = _field_accessor('year', 'Y', "The year of the datetime")
     month = _field_accessor('month', 'M',
-                            "\n The month as January=1, December=12 \n")
-    day = _field_accessor('day', 'D', "\nThe days of the datetime\n")
-    hour = _field_accessor('hour', 'h', "\nThe hours of the datetime\n")
-    minute = _field_accessor('minute', 'm', "\nThe minutes of the datetime\n")
-    second = _field_accessor('second', 's', "\nThe seconds of the datetime\n")
+                            "The month as January=1, December=12")
+    day = _field_accessor('day', 'D', "The days of the datetime")
+    hour = _field_accessor('hour', 'h', "The hours of the datetime")
+    minute = _field_accessor('minute', 'm', "The minutes of the datetime")
+    second = _field_accessor('second', 's', "The seconds of the datetime")
     microsecond = _field_accessor('microsecond', 'us',
-                                  "\nThe microseconds of the datetime\n")
+                                  "The microseconds of the datetime")
     nanosecond = _field_accessor('nanosecond', 'ns',
-                                 "\nThe nanoseconds of the datetime\n")
+                                 "The nanoseconds of the datetime")
     weekofyear = _field_accessor('weekofyear', 'woy',
-                                 "\nThe week ordinal of the year\n")
+                                 "The week ordinal of the year")
     week = weekofyear
     _dayofweek_doc = """
     The day of the week with Monday=0, Sunday=6.
@@ -1129,12 +1129,12 @@ def date(self):
         "The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0")
 
     dayofyear = _field_accessor('dayofyear', 'doy',
-                                "\nThe ordinal day of the year\n")
-    quarter = _field_accessor('quarter', 'q', "\nThe quarter of the date\n")
+                                "The ordinal day of the year")
+    quarter = _field_accessor('quarter', 'q', "The quarter of the date")
     days_in_month = _field_accessor(
         'days_in_month',
         'dim',
-        "\nThe number of days in the month\n")
+        "The number of days in the month")
     daysinmonth = days_in_month
     _is_month_doc = """
         Indicates whether the date is the {first_or_last} day of the month.
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 830283d31a929..4afc9f5483c2a 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -59,7 +59,7 @@ def f(self):
         return result
 
     f.__name__ = name
-    f.__doc__ = docstring
+    f.__doc__ = "\n{}\n".format(docstring)
     return property(f)
 
 
@@ -684,16 +684,16 @@ def to_pytimedelta(self):
         return tslibs.ints_to_pytimedelta(self.asi8)
 
     days = _field_accessor("days", "days",
-                           "\nNumber of days for each element.\n")
+                           "Number of days for each element.")
     seconds = _field_accessor("seconds", "seconds",
-                              "\nNumber of seconds (>= 0 and less than 1 day) "
-                              "for each element.\n")
+                              "Number of seconds (>= 0 and less than 1 day) "
+                              "for each element.")
     microseconds = _field_accessor("microseconds", "microseconds",
-                                   "\nNumber of microseconds (>= 0 and less "
-                                   "than 1 second) for each element.\n")
+                                   "Number of microseconds (>= 0 and less "
+                                   "than 1 second) for each element.")
     nanoseconds = _field_accessor("nanoseconds", "nanoseconds",
-                                  "\nNumber of nanoseconds (>= 0 and less "
-                                  "than 1 microsecond) for each element.\n")
+                                  "Number of nanoseconds (>= 0 and less "
+                                  "than 1 microsecond) for each element.")
 
     @property
     def components(self):

From db4440d78ec44aaccd46efb7e8f857401e535e42 Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Tue, 22 Jan 2019 00:23:18 +0100
Subject: [PATCH 02/48] add sym option for logx logy loglog and tests

---
 pandas/plotting/_core.py            | 14 ++++++++++----
 pandas/tests/plotting/test_frame.py |  6 ++++++
 2 files changed, 16 insertions(+), 4 deletions(-)

diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 3ba06c0638317..cd5140d57b8d1 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -83,9 +83,9 @@ def _kind(self):
     _default_rot = 0
     orientation = None
     _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
-                       'mark_right', 'stacked']
+                       'mark_right', 'stacked', 'sym']
     _attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
-                      'mark_right': True, 'stacked': False}
+                      'mark_right': True, 'stacked': False, 'sym': False}
 
     def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
                  sharey=False, use_index=True,
@@ -310,9 +310,15 @@ def _setup_subplots(self):
         axes = _flatten(axes)
 
         if self.logx or self.loglog:
-            [a.set_xscale('log') for a in axes]
+            if self.sym:
+                [a.set_xscale('symlog') for a in axes]
+            else:
+                [a.set_xscale('log') for a in axes]
         if self.logy or self.loglog:
-            [a.set_yscale('log') for a in axes]
+            if self.sym:
+                [a.set_yscale('symlog') for a in axes]
+            else:
+                [a.set_yscale('log') for a in axes]
 
         self.fig = fig
         self.axes = axes
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 0e7672f4e2f9d..50574b5392557 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -233,12 +233,18 @@ def test_logscales(self):
         df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
         ax = df.plot(logy=True)
         self._check_ax_scales(ax, yaxis='log')
+        ax = df.plot(logy=True, sym=True)
+        self._check_ax_scales(ax, yaxis='symlog')
 
         ax = df.plot(logx=True)
         self._check_ax_scales(ax, xaxis='log')
+        ax = df.plot(logx=True, sym=True)
+        self._check_ax_scales(ax, xaxis='symlog')
 
         ax = df.plot(loglog=True)
         self._check_ax_scales(ax, xaxis='log', yaxis='log')
+        ax = df.plot(loglog=True, sym=True)
+        self._check_ax_scales(ax, xaxis='symlog', yaxis='symlog')
 
     @pytest.mark.slow
     def test_xcompat(self):

From a49be7f4ea4773af5b801ebd9504a20e5b40b245 Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <emailformattr@gmail.com>
Date: Tue, 22 Jan 2019 05:04:02 -0800
Subject: [PATCH 03/48] BUG: DataFrame respects dtype with masked recarray
 (#24874)

---
 doc/source/whatsnew/v0.24.0.rst         |  4 ++--
 pandas/core/internals/construction.py   |  2 +-
 pandas/tests/frame/test_constructors.py | 11 +++++++++++
 3 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 69b59793f7c0d..d782e3d6858a4 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1692,8 +1692,8 @@ Missing
 - Bug in :func:`Series.hasnans` that could be incorrectly cached and return incorrect answers if null elements are introduced after an initial call (:issue:`19700`)
 - :func:`Series.isin` now treats all NaN-floats as equal also for ``np.object``-dtype. This behavior is consistent with the behavior for float64 (:issue:`22119`)
 - :func:`unique` no longer mangles NaN-floats and the ``NaT``-object for ``np.object``-dtype, i.e. ``NaT`` is no longer coerced to a NaN-value and is treated as a different entity. (:issue:`22295`)
-- :func:`DataFrame` and :func:`Series` now properly handle numpy masked arrays with hardened masks. Previously, constructing a DataFrame or Series from a masked array with a hard mask would create a pandas object containing the underlying value, rather than the expected NaN. (:issue:`24574`)
-
+- :class:`DataFrame` and :class:`Series` now properly handle numpy masked arrays with hardened masks. Previously, constructing a DataFrame or Series from a masked array with a hard mask would create a pandas object containing the underlying value, rather than the expected NaN. (:issue:`24574`)
+- Bug in :class:`DataFrame` constructor where ``dtype`` argument was not honored when handling numpy masked record arrays. (:issue:`24874`)
 
 MultiIndex
 ^^^^^^^^^^
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 7af347a141781..c05a9a0f8f3c7 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -93,7 +93,7 @@ def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
     if columns is None:
         columns = arr_columns
 
-    mgr = arrays_to_mgr(arrays, arr_columns, index, columns)
+    mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
 
     if copy:
         mgr = mgr.copy()
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 4f6a2e2bfbebf..90ad48cac3a5f 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -787,6 +787,17 @@ def test_constructor_maskedarray_hardened(self):
             dtype=float)
         tm.assert_frame_equal(result, expected)
 
+    def test_constructor_maskedrecarray_dtype(self):
+        # Ensure constructor honors dtype
+        data = np.ma.array(
+            np.ma.zeros(5, dtype=[('date', '<f8'), ('price', '<f8')]),
+            mask=[False] * 5)
+        data = data.view(ma.mrecords.mrecarray)
+        result = pd.DataFrame(data, dtype=int)
+        expected = pd.DataFrame(np.zeros((5, 2), dtype=int),
+                                columns=['date', 'price'])
+        tm.assert_frame_equal(result, expected)
+
     def test_constructor_mrecarray(self):
         # Ensure mrecarray produces frame identical to dict of masked arrays
         # from GH3479

From 853cd7031ff98737b9a50404c4a181dcda67a0be Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <emailformattr@gmail.com>
Date: Tue, 22 Jan 2019 06:49:32 -0800
Subject: [PATCH 04/48] REF/CLN: Move private method (#24875)

---
 pandas/core/computation/expr.py | 20 +++++++++++++++-----
 pandas/core/reshape/util.py     | 13 -------------
 2 files changed, 15 insertions(+), 18 deletions(-)

diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 9a44198ba3b86..d840bf6ae71a2 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -18,7 +18,6 @@
     UndefinedVariableError, _arith_ops_syms, _bool_ops_syms, _cmp_ops_syms,
     _mathops, _reductions, _unary_ops_syms, is_term)
 from pandas.core.computation.scope import Scope
-from pandas.core.reshape.util import compose
 
 import pandas.io.formats.printing as printing
 
@@ -103,8 +102,19 @@ def _replace_locals(tok):
     return toknum, tokval
 
 
-def _preparse(source, f=compose(_replace_locals, _replace_booleans,
-                                _rewrite_assign)):
+def _compose2(f, g):
+    """Compose 2 callables"""
+    return lambda *args, **kwargs: f(g(*args, **kwargs))
+
+
+def _compose(*funcs):
+    """Compose 2 or more callables"""
+    assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
+    return reduce(_compose2, funcs)
+
+
+def _preparse(source, f=_compose(_replace_locals, _replace_booleans,
+                                 _rewrite_assign)):
     """Compose a collection of tokenization functions
 
     Parameters
@@ -701,8 +711,8 @@ def visitor(x, y):
 class PandasExprVisitor(BaseExprVisitor):
 
     def __init__(self, env, engine, parser,
-                 preparser=partial(_preparse, f=compose(_replace_locals,
-                                                        _replace_booleans))):
+                 preparser=partial(_preparse, f=_compose(_replace_locals,
+                                                         _replace_booleans))):
         super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)
 
 
diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py
index 7f43a0e9719b8..9d4135a7f310e 100644
--- a/pandas/core/reshape/util.py
+++ b/pandas/core/reshape/util.py
@@ -1,7 +1,5 @@
 import numpy as np
 
-from pandas.compat import reduce
-
 from pandas.core.dtypes.common import is_list_like
 
 from pandas.core import common as com
@@ -57,14 +55,3 @@ def cartesian_product(X):
     return [np.tile(np.repeat(np.asarray(com.values_from_object(x)), b[i]),
                     np.product(a[i]))
             for i, x in enumerate(X)]
-
-
-def _compose2(f, g):
-    """Compose 2 callables"""
-    return lambda *args, **kwargs: f(g(*args, **kwargs))
-
-
-def compose(*funcs):
-    """Compose 2 or more callables"""
-    assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
-    return reduce(_compose2, funcs)

From 6155ffde425a584a7baa32ca78fbbe0a8435d4cd Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Tue, 22 Jan 2019 23:04:44 +0100
Subject: [PATCH 05/48] changes based on reviews

---
 pandas/plotting/_core.py            | 28 ++++++++++++++++------------
 pandas/tests/plotting/test_frame.py | 26 +++++++++++++++++++++++---
 2 files changed, 39 insertions(+), 15 deletions(-)

diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index cd5140d57b8d1..8002811acc88a 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -83,9 +83,9 @@ def _kind(self):
     _default_rot = 0
     orientation = None
     _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
-                       'mark_right', 'stacked', 'sym']
+                       'mark_right', 'stacked']
     _attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
-                      'mark_right': True, 'stacked': False, 'sym': False}
+                      'mark_right': True, 'stacked': False}
 
     def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
                  sharey=False, use_index=True,
@@ -309,16 +309,20 @@ def _setup_subplots(self):
 
         axes = _flatten(axes)
 
-        if self.logx or self.loglog:
-            if self.sym:
-                [a.set_xscale('symlog') for a in axes]
-            else:
-                [a.set_xscale('log') for a in axes]
-        if self.logy or self.loglog:
-            if self.sym:
-                [a.set_yscale('symlog') for a in axes]
-            else:
-                [a.set_yscale('log') for a in axes]
+        valid_log = [False, True, 'sym']
+        for i in [self.logx, self.logy, self.loglog]:
+            if i not in valid_log:
+                raise ValueError("Wrong input for log option.")
+
+        if self.logx is True or self.loglog is True:
+            [a.set_xscale('log') for a in axes]
+        elif self.logx == 'sym' or self.loglog == 'sym':
+            [a.set_xscale('symlog') for a in axes]
+
+        if self.logy is True or self.loglog is True:
+            [a.set_yscale('log') for a in axes]
+        elif self.logy == 'sym' or self.loglog == 'sym':
+            [a.set_yscale('symlog') for a in axes]
 
         self.fig = fig
         self.axes = axes
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 50574b5392557..344afa940bb7a 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -231,20 +231,40 @@ def test_plot_xy(self):
     @pytest.mark.slow
     def test_logscales(self):
         df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
+
         ax = df.plot(logy=True)
         self._check_ax_scales(ax, yaxis='log')
-        ax = df.plot(logy=True, sym=True)
+        assert ax.get_yscale() == 'log'
+
+        ax = df.plot(logy='sym')
         self._check_ax_scales(ax, yaxis='symlog')
+        assert ax.get_yscale() == 'symlog'
 
         ax = df.plot(logx=True)
         self._check_ax_scales(ax, xaxis='log')
-        ax = df.plot(logx=True, sym=True)
+        assert ax.get_xscale() == 'log'
+
+        ax = df.plot(logx='sym')
         self._check_ax_scales(ax, xaxis='symlog')
+        assert ax.get_xscale() == 'symlog'
 
         ax = df.plot(loglog=True)
         self._check_ax_scales(ax, xaxis='log', yaxis='log')
-        ax = df.plot(loglog=True, sym=True)
+        assert ax.get_xscale() == 'log'
+        assert ax.get_yscale() == 'log'
+
+        ax = df.plot(loglog='sym')
         self._check_ax_scales(ax, xaxis='symlog', yaxis='symlog')
+        assert ax.get_xscale() == 'symlog'
+        assert ax.get_yscale() == 'symlog'
+
+    @pytest.mark.parametrize("wrong_input", ["sm", "symlog"])
+    def test_invalid_logscale(self, wrong_input):
+        df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
+
+        msg = "Wrong input for log option."
+        with pytest.raises(ValueError, match=msg):
+            df.plot(logy=wrong_input)
 
     @pytest.mark.slow
     def test_xcompat(self):

From 597f9f31639eeb5724e49bec602e15b9bf8be092 Mon Sep 17 00:00:00 2001
From: cgangwar11 <chandan.gangwar0411@gmail.com>
Date: Wed, 23 Jan 2019 04:59:33 +0530
Subject: [PATCH 06/48] BUG : ValueError in case on NaN value in groupby
 columns (#24850)

---
 doc/source/whatsnew/v0.24.0.rst          |  1 +
 pandas/core/groupby/grouper.py           |  1 +
 pandas/tests/groupby/test_categorical.py | 33 ++++++++++++++++++++++++
 3 files changed, 35 insertions(+)

diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index d782e3d6858a4..9d2dea3aeb796 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1786,6 +1786,7 @@ Groupby/Resample/Rolling
 - Bug in :meth:`DataFrame.groupby` did not respect the ``observed`` argument when selecting a column and instead always used ``observed=False`` (:issue:`23970`)
 - Bug in :func:`pandas.core.groupby.SeriesGroupBy.pct_change` or :func:`pandas.core.groupby.DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`).
 - Bug preventing hash table creation with very large number (2^32) of rows (:issue:`22805`)
+- Bug in groupby when grouping on categorical causes ``ValueError`` and incorrect grouping if ``observed=True`` and ``nan`` is present in categorical column (:issue:`24740`, :issue:`21151`).
 
 Reshaping
 ^^^^^^^^^
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 633a1643f6cdd..260417bc0d598 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -299,6 +299,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
                 self._labels = self.grouper.codes
                 if observed:
                     codes = algorithms.unique1d(self.grouper.codes)
+                    codes = codes[codes != -1]
                 else:
                     codes = np.arange(len(categories))
 
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 144b64025e1c0..e118135ccc75d 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -420,6 +420,39 @@ def test_observed_groups(observed):
     tm.assert_dict_equal(result, expected)
 
 
+def test_observed_groups_with_nan(observed):
+    # GH 24740
+    df = pd.DataFrame({'cat': pd.Categorical(['a', np.nan, 'a'],
+                       categories=['a', 'b', 'd']),
+                       'vals': [1, 2, 3]})
+    g = df.groupby('cat', observed=observed)
+    result = g.groups
+    if observed:
+        expected = {'a': Index([0, 2], dtype='int64')}
+    else:
+        expected = {'a': Index([0, 2], dtype='int64'),
+                    'b': Index([], dtype='int64'),
+                    'd': Index([], dtype='int64')}
+    tm.assert_dict_equal(result, expected)
+
+
+def test_dataframe_categorical_with_nan(observed):
+    # GH 21151
+    s1 = pd.Categorical([np.nan, 'a', np.nan, 'a'],
+                        categories=['a', 'b', 'c'])
+    s2 = pd.Series([1, 2, 3, 4])
+    df = pd.DataFrame({'s1': s1, 's2': s2})
+    result = df.groupby('s1', observed=observed).first().reset_index()
+    if observed:
+        expected = DataFrame({'s1': pd.Categorical(['a'],
+                              categories=['a', 'b', 'c']), 's2': [2]})
+    else:
+        expected = DataFrame({'s1': pd.Categorical(['a', 'b', 'c'],
+                              categories=['a', 'b', 'c']),
+                              's2': [2, np.nan, np.nan]})
+    tm.assert_frame_equal(result, expected)
+
+
 def test_datetime():
     # GH9049: ensure backward compatibility
     levels = pd.date_range('2014-01-01', periods=4)

From c588437f116a080cdcabb023a5e746dda2cb63c9 Mon Sep 17 00:00:00 2001
From: Guillaume Lemaitre <g.lemaitre58@gmail.com>
Date: Wed, 23 Jan 2019 13:23:49 +0100
Subject: [PATCH 07/48] BUG: fix floating precision formatting in presence of
 inf (#24863)

---
 doc/source/whatsnew/v0.24.0.rst        |  1 +
 pandas/io/formats/format.py            | 14 +++++++----
 pandas/tests/io/formats/test_format.py | 33 ++++++++++++++++++++++++++
 3 files changed, 43 insertions(+), 5 deletions(-)

diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 9d2dea3aeb796..3dd345890881c 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1751,6 +1751,7 @@ I/O
 - Bug in :meth:`DataFrame.to_stata`, :class:`pandas.io.stata.StataWriter` and :class:`pandas.io.stata.StataWriter117` where a exception would leave a partially written and invalid dta file (:issue:`23573`)
 - Bug in :meth:`DataFrame.to_stata` and :class:`pandas.io.stata.StataWriter117` that produced invalid files when using strLs with non-ASCII characters (:issue:`23573`)
 - Bug in :class:`HDFStore` that caused it to raise ``ValueError`` when reading a Dataframe in Python 3 from fixed format written in Python 2 (:issue:`24510`)
+- Bug in :func:`DataFrame.to_string()` and more generally in the floating ``repr`` formatter. Zeros were not trimmed if ``inf`` was present in a columns while it was the case with NA values. Zeros are now trimmed as in the presence of NA (:issue:`24861`).
 
 Plotting
 ^^^^^^^^
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index bdeed58d856cc..2c1fcab1ebde9 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1414,16 +1414,20 @@ def _trim_zeros(str_floats, na_rep='NaN'):
     """
     trimmed = str_floats
 
+    def _is_number(x):
+        return (x != na_rep and not x.endswith('inf'))
+
     def _cond(values):
-        non_na = [x for x in values if x != na_rep]
-        return (len(non_na) > 0 and all(x.endswith('0') for x in non_na) and
-                not (any(('e' in x) or ('E' in x) for x in non_na)))
+        finite = [x for x in values if _is_number(x)]
+        return (len(finite) > 0 and all(x.endswith('0') for x in finite) and
+                not (any(('e' in x) or ('E' in x) for x in finite)))
 
     while _cond(trimmed):
-        trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
+        trimmed = [x[:-1] if _is_number(x) else x for x in trimmed]
 
     # leave one 0 after the decimal points if need be.
-    return [x + "0" if x.endswith('.') and x != na_rep else x for x in trimmed]
+    return [x + "0" if x.endswith('.') and _is_number(x) else x
+            for x in trimmed]
 
 
 def _has_names(index):
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 52dce572c6d4f..31ab1e050d95c 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -1465,6 +1465,39 @@ def test_to_string_format_na(self):
                     '4  4.0     bar')
         assert result == expected
 
+    def test_to_string_format_inf(self):
+        # Issue #24861
+        tm.reset_display_options()
+        df = DataFrame({
+            'A': [-np.inf, np.inf, -1, -2.1234, 3, 4],
+            'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar']
+        })
+        result = df.to_string()
+
+        expected = ('        A       B\n'
+                    '0    -inf    -inf\n'
+                    '1     inf     inf\n'
+                    '2 -1.0000     foo\n'
+                    '3 -2.1234   foooo\n'
+                    '4  3.0000  fooooo\n'
+                    '5  4.0000     bar')
+        assert result == expected
+
+        df = DataFrame({
+            'A': [-np.inf, np.inf, -1., -2., 3., 4.],
+            'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar']
+        })
+        result = df.to_string()
+
+        expected = ('     A       B\n'
+                    '0 -inf    -inf\n'
+                    '1  inf     inf\n'
+                    '2 -1.0     foo\n'
+                    '3 -2.0   foooo\n'
+                    '4  3.0  fooooo\n'
+                    '5  4.0     bar')
+        assert result == expected
+
     def test_to_string_decimal(self):
         # Issue #23614
         df = DataFrame({'A': [6.0, 3.1, 2.2]})

From 8972ddf13f6d0ba827f99a63d4261944baf3f29f Mon Sep 17 00:00:00 2001
From: Marc Garcia <garcia.marc@gmail.com>
Date: Wed, 23 Jan 2019 16:22:24 +0000
Subject: [PATCH 08/48] DOC: Creating top-level user guide section, and moving
 pages inside (#24677)

---
 doc/source/index.rst.template                 | 20 +---------
 doc/source/{ => user_guide}/advanced.rst      |  0
 doc/source/{ => user_guide}/categorical.rst   |  0
 doc/source/{ => user_guide}/computation.rst   |  0
 doc/source/{ => user_guide}/enhancingperf.rst |  4 +-
 doc/source/{ => user_guide}/gotchas.rst       |  0
 doc/source/{ => user_guide}/groupby.rst       |  0
 doc/source/user_guide/index.rst               | 39 +++++++++++++++++++
 doc/source/{ => user_guide}/indexing.rst      |  4 +-
 doc/source/{ => user_guide}/integer_na.rst    |  0
 doc/source/{ => user_guide}/io.rst            | 14 +++----
 doc/source/{ => user_guide}/merging.rst       |  0
 doc/source/{ => user_guide}/missing_data.rst  |  0
 doc/source/{ => user_guide}/options.rst       |  8 ++--
 doc/source/{ => user_guide}/reshaping.rst     | 12 +++---
 doc/source/{ => user_guide}/sparse.rst        |  0
 doc/source/{ => user_guide}/style.ipynb       |  2 +-
 doc/source/{ => user_guide}/text.rst          |  0
 doc/source/{ => user_guide}/timedeltas.rst    |  0
 doc/source/{ => user_guide}/timeseries.rst    |  0
 doc/source/{ => user_guide}/visualization.rst |  0
 21 files changed, 62 insertions(+), 41 deletions(-)
 rename doc/source/{ => user_guide}/advanced.rst (100%)
 rename doc/source/{ => user_guide}/categorical.rst (100%)
 rename doc/source/{ => user_guide}/computation.rst (100%)
 rename doc/source/{ => user_guide}/enhancingperf.rst (99%)
 rename doc/source/{ => user_guide}/gotchas.rst (100%)
 rename doc/source/{ => user_guide}/groupby.rst (100%)
 create mode 100644 doc/source/user_guide/index.rst
 rename doc/source/{ => user_guide}/indexing.rst (99%)
 rename doc/source/{ => user_guide}/integer_na.rst (100%)
 rename doc/source/{ => user_guide}/io.rst (99%)
 rename doc/source/{ => user_guide}/merging.rst (100%)
 rename doc/source/{ => user_guide}/missing_data.rst (100%)
 rename doc/source/{ => user_guide}/options.rst (99%)
 rename doc/source/{ => user_guide}/reshaping.rst (98%)
 rename doc/source/{ => user_guide}/sparse.rst (100%)
 rename doc/source/{ => user_guide}/style.ipynb (99%)
 rename doc/source/{ => user_guide}/text.rst (100%)
 rename doc/source/{ => user_guide}/timedeltas.rst (100%)
 rename doc/source/{ => user_guide}/timeseries.rst (100%)
 rename doc/source/{ => user_guide}/visualization.rst (100%)

diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index b85150c3444b7..571c906acbd43 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -131,25 +131,7 @@ See the package overview for more detail about what's in the library.
     cookbook
     dsintro
     basics
-    text
-    options
-    indexing
-    advanced
-    computation
-    missing_data
-    groupby
-    merging
-    reshaping
-    timeseries
-    timedeltas
-    categorical
-    integer_na
-    visualization
-    style
-    io
-    enhancingperf
-    sparse
-    gotchas
+    user_guide/index
     r_interface
     ecosystem
     comparison_with_r
diff --git a/doc/source/advanced.rst b/doc/source/user_guide/advanced.rst
similarity index 100%
rename from doc/source/advanced.rst
rename to doc/source/user_guide/advanced.rst
diff --git a/doc/source/categorical.rst b/doc/source/user_guide/categorical.rst
similarity index 100%
rename from doc/source/categorical.rst
rename to doc/source/user_guide/categorical.rst
diff --git a/doc/source/computation.rst b/doc/source/user_guide/computation.rst
similarity index 100%
rename from doc/source/computation.rst
rename to doc/source/user_guide/computation.rst
diff --git a/doc/source/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst
similarity index 99%
rename from doc/source/enhancingperf.rst
rename to doc/source/user_guide/enhancingperf.rst
index 0e3d389aa4f6e..9941ffcc9de4d 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/user_guide/enhancingperf.rst
@@ -783,7 +783,7 @@ significant performance benefit.  Here is a plot showing the running time of
 computation. The two lines are two different engines.
 
 
-.. image:: _static/eval-perf.png
+.. image:: ../_static/eval-perf.png
 
 
 .. note::
@@ -791,7 +791,7 @@ computation. The two lines are two different engines.
    Operations with smallish objects (around 15k-20k rows) are faster using
    plain Python:
 
-       .. image:: _static/eval-perf-small.png
+       .. image:: ../_static/eval-perf-small.png
 
 
 This plot was created using a ``DataFrame`` with 3 columns each containing
diff --git a/doc/source/gotchas.rst b/doc/source/user_guide/gotchas.rst
similarity index 100%
rename from doc/source/gotchas.rst
rename to doc/source/user_guide/gotchas.rst
diff --git a/doc/source/groupby.rst b/doc/source/user_guide/groupby.rst
similarity index 100%
rename from doc/source/groupby.rst
rename to doc/source/user_guide/groupby.rst
diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst
new file mode 100644
index 0000000000000..60e722808d647
--- /dev/null
+++ b/doc/source/user_guide/index.rst
@@ -0,0 +1,39 @@
+{{ header }}
+
+.. _user_guide:
+
+==========
+User Guide
+==========
+
+The User Guide covers all of pandas by topic area. Each of the subsections
+introduces a topic (such as "working with missing data"), and discusses how
+pandas approaches the problem, with many examples throughout.
+
+Users brand-new to pandas should start with :ref:`10min`.
+
+Further information on any specific method can be obtained in the
+:ref:`api`.
+
+.. toctree::
+    :maxdepth: 2
+
+    io
+    indexing
+    advanced
+    merging
+    reshaping
+    text
+    missing_data
+    categorical
+    integer_na
+    visualization
+    computation
+    groupby
+    timeseries
+    timedeltas
+    style
+    options
+    enhancingperf
+    sparse
+    gotchas
diff --git a/doc/source/indexing.rst b/doc/source/user_guide/indexing.rst
similarity index 99%
rename from doc/source/indexing.rst
rename to doc/source/user_guide/indexing.rst
index 3fe416c48f670..be1745e2664a1 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -1392,7 +1392,7 @@ Performance of :meth:`~pandas.DataFrame.query`
 ``DataFrame.query()`` using ``numexpr`` is slightly faster than Python for
 large frames.
 
-.. image:: _static/query-perf.png
+.. image:: ../_static/query-perf.png
 
 .. note::
 
@@ -1400,7 +1400,7 @@ large frames.
    with ``DataFrame.query()`` if your frame has more than approximately 200,000
    rows.
 
-      .. image:: _static/query-perf-small.png
+      .. image:: ../_static/query-perf-small.png
 
 This plot was created using a ``DataFrame`` with 3 columns each containing
 floating point values generated using ``numpy.random.randn()``.
diff --git a/doc/source/integer_na.rst b/doc/source/user_guide/integer_na.rst
similarity index 100%
rename from doc/source/integer_na.rst
rename to doc/source/user_guide/integer_na.rst
diff --git a/doc/source/io.rst b/doc/source/user_guide/io.rst
similarity index 99%
rename from doc/source/io.rst
rename to doc/source/user_guide/io.rst
index dd1cde0bdff73..0132392aacaff 100644
--- a/doc/source/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -2549,7 +2549,7 @@ in the method ``to_string`` described above.
 HTML:
 
 .. raw:: html
-   :file: _static/basic.html
+   :file: ../_static/basic.html
 
 The ``columns`` argument will limit the columns shown:
 
@@ -2565,7 +2565,7 @@ The ``columns`` argument will limit the columns shown:
 HTML:
 
 .. raw:: html
-   :file: _static/columns.html
+   :file: ../_static/columns.html
 
 ``float_format`` takes a Python callable to control the precision of floating
 point values:
@@ -2582,7 +2582,7 @@ point values:
 HTML:
 
 .. raw:: html
-   :file: _static/float_format.html
+   :file: ../_static/float_format.html
 
 ``bold_rows`` will make the row labels bold by default, but you can turn that
 off:
@@ -2597,7 +2597,7 @@ off:
    write_html(df, 'nobold', bold_rows=False)
 
 .. raw:: html
-   :file: _static/nobold.html
+   :file: ../_static/nobold.html
 
 The ``classes`` argument provides the ability to give the resulting HTML
 table CSS classes. Note that these classes are *appended* to the existing
@@ -2627,7 +2627,7 @@ that contain URLs.
 HTML:
 
 .. raw:: html
-   :file: _static/render_links.html
+   :file: ../_static/render_links.html
 
 Finally, the ``escape`` argument allows you to control whether the
 "<", ">" and "&" characters escaped in the resulting HTML (by default it is
@@ -2651,7 +2651,7 @@ Escaped:
    print(df.to_html())
 
 .. raw:: html
-   :file: _static/escape.html
+   :file: ../_static/escape.html
 
 Not escaped:
 
@@ -2660,7 +2660,7 @@ Not escaped:
    print(df.to_html(escape=False))
 
 .. raw:: html
-   :file: _static/noescape.html
+   :file: ../_static/noescape.html
 
 .. note::
 
diff --git a/doc/source/merging.rst b/doc/source/user_guide/merging.rst
similarity index 100%
rename from doc/source/merging.rst
rename to doc/source/user_guide/merging.rst
diff --git a/doc/source/missing_data.rst b/doc/source/user_guide/missing_data.rst
similarity index 100%
rename from doc/source/missing_data.rst
rename to doc/source/user_guide/missing_data.rst
diff --git a/doc/source/options.rst b/doc/source/user_guide/options.rst
similarity index 99%
rename from doc/source/options.rst
rename to doc/source/user_guide/options.rst
index e91be3e6ae730..d640d8b1153c5 100644
--- a/doc/source/options.rst
+++ b/doc/source/user_guide/options.rst
@@ -487,7 +487,7 @@ If a DataFrame or Series contains these characters, the default output mode may
    df = pd.DataFrame({u'国籍': ['UK', u'日本'], u'名前': ['Alice', u'しのぶ']})
    df
 
-.. image:: _static/option_unicode01.png
+.. image:: ../_static/option_unicode01.png
 
 Enabling ``display.unicode.east_asian_width`` allows pandas to check each character's "East Asian Width" property.
 These characters can be aligned properly by setting this option to ``True``. However, this will result in longer render
@@ -498,7 +498,7 @@ times than the standard ``len`` function.
    pd.set_option('display.unicode.east_asian_width', True)
    df
 
-.. image:: _static/option_unicode02.png
+.. image:: ../_static/option_unicode02.png
 
 In addition, Unicode characters whose width is "Ambiguous" can either be 1 or 2 characters wide depending on the
 terminal setting or encoding. The option ``display.unicode.ambiguous_as_wide`` can be used to handle the ambiguity.
@@ -510,7 +510,7 @@ By default, an "Ambiguous" character's width, such as "¡" (inverted exclamation
    df = pd.DataFrame({'a': ['xxx', u'¡¡'], 'b': ['yyy', u'¡¡']})
    df
 
-.. image:: _static/option_unicode03.png
+.. image:: ../_static/option_unicode03.png
 
 Enabling ``display.unicode.ambiguous_as_wide`` makes pandas interpret these characters' widths to be 2.
 (Note that this option will only be effective when ``display.unicode.east_asian_width`` is enabled.)
@@ -522,7 +522,7 @@ However, setting this option incorrectly for your terminal will cause these char
    pd.set_option('display.unicode.ambiguous_as_wide', True)
    df
 
-.. image:: _static/option_unicode04.png
+.. image:: ../_static/option_unicode04.png
 
 .. ipython:: python
    :suppress:
diff --git a/doc/source/reshaping.rst b/doc/source/user_guide/reshaping.rst
similarity index 98%
rename from doc/source/reshaping.rst
rename to doc/source/user_guide/reshaping.rst
index 9891e22e9d552..5c11be34e6ed4 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/user_guide/reshaping.rst
@@ -9,7 +9,7 @@ Reshaping and Pivot Tables
 Reshaping by pivoting DataFrame objects
 ---------------------------------------
 
-.. image:: _static/reshaping_pivot.png
+.. image:: ../_static/reshaping_pivot.png
 
 .. ipython:: python
    :suppress:
@@ -101,7 +101,7 @@ are homogeneously-typed.
 Reshaping by stacking and unstacking
 ------------------------------------
 
-.. image:: _static/reshaping_stack.png
+.. image:: ../_static/reshaping_stack.png
 
 Closely related to the :meth:`~DataFrame.pivot` method are the related
 :meth:`~DataFrame.stack` and :meth:`~DataFrame.unstack` methods available on
@@ -116,7 +116,7 @@ Closely related to the :meth:`~DataFrame.pivot` method are the related
   (possibly hierarchical) row index to the column axis, producing a reshaped
   ``DataFrame`` with a new inner-most level of column labels.
 
-.. image:: _static/reshaping_unstack.png
+.. image:: ../_static/reshaping_unstack.png
 
 The clearest way to explain is by example. Let's take a prior example data set
 from the hierarchical indexing section:
@@ -158,7 +158,7 @@ unstacks the **last level**:
 
 .. _reshaping.unstack_by_name:
 
-.. image:: _static/reshaping_unstack_1.png
+.. image:: ../_static/reshaping_unstack_1.png
 
 If the indexes have names, you can use the level names instead of specifying
 the level numbers:
@@ -168,7 +168,7 @@ the level numbers:
    stacked.unstack('second')
 
 
-.. image:: _static/reshaping_unstack_0.png
+.. image:: ../_static/reshaping_unstack_0.png
 
 Notice that the ``stack`` and ``unstack`` methods implicitly sort the index
 levels involved. Hence a call to ``stack`` and then ``unstack``, or vice versa,
@@ -279,7 +279,7 @@ the right thing:
 Reshaping by Melt
 -----------------
 
-.. image:: _static/reshaping_melt.png
+.. image:: ../_static/reshaping_melt.png
 
 The top-level :func:`~pandas.melt` function and the corresponding :meth:`DataFrame.melt`
 are useful to massage a ``DataFrame`` into a format where one or more columns
diff --git a/doc/source/sparse.rst b/doc/source/user_guide/sparse.rst
similarity index 100%
rename from doc/source/sparse.rst
rename to doc/source/user_guide/sparse.rst
diff --git a/doc/source/style.ipynb b/doc/source/user_guide/style.ipynb
similarity index 99%
rename from doc/source/style.ipynb
rename to doc/source/user_guide/style.ipynb
index 792fe5120f6e8..a238c3b16e9ad 100644
--- a/doc/source/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -992,7 +992,7 @@
    "source": [
     "A screenshot of the output:\n",
     "\n",
-    "![Excel spreadsheet with styled DataFrame](_static/style-excel.png)\n"
+    "![Excel spreadsheet with styled DataFrame](../_static/style-excel.png)\n"
    ]
   },
   {
diff --git a/doc/source/text.rst b/doc/source/user_guide/text.rst
similarity index 100%
rename from doc/source/text.rst
rename to doc/source/user_guide/text.rst
diff --git a/doc/source/timedeltas.rst b/doc/source/user_guide/timedeltas.rst
similarity index 100%
rename from doc/source/timedeltas.rst
rename to doc/source/user_guide/timedeltas.rst
diff --git a/doc/source/timeseries.rst b/doc/source/user_guide/timeseries.rst
similarity index 100%
rename from doc/source/timeseries.rst
rename to doc/source/user_guide/timeseries.rst
diff --git a/doc/source/visualization.rst b/doc/source/user_guide/visualization.rst
similarity index 100%
rename from doc/source/visualization.rst
rename to doc/source/user_guide/visualization.rst

From 14eac280e1b7516714d117e65b45023dee7feee7 Mon Sep 17 00:00:00 2001
From: Marc Garcia <garcia.marc@gmail.com>
Date: Wed, 23 Jan 2019 16:23:18 +0000
Subject: [PATCH 09/48] DOC: Creating top-level development section, and moving
 pages inside (#24691)

---
 doc/source/{ => development}/contributing.rst     |  2 +-
 .../{ => development}/contributing_docstring.rst  |  0
 doc/source/{ => development}/developer.rst        |  0
 doc/source/{ => development}/extending.rst        |  0
 doc/source/development/index.rst                  | 15 +++++++++++++++
 doc/source/{ => development}/internals.rst        |  0
 doc/source/index.rst.template                     |  5 +----
 setup.cfg                                         |  2 +-
 8 files changed, 18 insertions(+), 6 deletions(-)
 rename doc/source/{ => development}/contributing.rst (99%)
 rename doc/source/{ => development}/contributing_docstring.rst (100%)
 rename doc/source/{ => development}/developer.rst (100%)
 rename doc/source/{ => development}/extending.rst (100%)
 create mode 100644 doc/source/development/index.rst
 rename doc/source/{ => development}/internals.rst (100%)

diff --git a/doc/source/contributing.rst b/doc/source/development/contributing.rst
similarity index 99%
rename from doc/source/contributing.rst
rename to doc/source/development/contributing.rst
index a68e5c70087e9..c9d6845107dfc 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -698,7 +698,7 @@ A pull-request will be considered for merging when you have an all 'green' build
 then you will get a red 'X', where you can click through to see the individual failed tests.
 This is an example of a green build.
 
-.. image:: _static/ci.png
+.. image:: ../_static/ci.png
 
 .. note::
 
diff --git a/doc/source/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst
similarity index 100%
rename from doc/source/contributing_docstring.rst
rename to doc/source/development/contributing_docstring.rst
diff --git a/doc/source/developer.rst b/doc/source/development/developer.rst
similarity index 100%
rename from doc/source/developer.rst
rename to doc/source/development/developer.rst
diff --git a/doc/source/extending.rst b/doc/source/development/extending.rst
similarity index 100%
rename from doc/source/extending.rst
rename to doc/source/development/extending.rst
diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst
new file mode 100644
index 0000000000000..d67a6c3a2ca04
--- /dev/null
+++ b/doc/source/development/index.rst
@@ -0,0 +1,15 @@
+{{ header }}
+
+.. _development:
+
+===========
+Development
+===========
+
+.. toctree::
+    :maxdepth: 2
+
+    contributing
+    internals
+    extending
+    developer
diff --git a/doc/source/internals.rst b/doc/source/development/internals.rst
similarity index 100%
rename from doc/source/internals.rst
rename to doc/source/development/internals.rst
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 571c906acbd43..53f116a1a0e0a 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -124,7 +124,6 @@ See the package overview for more detail about what's in the library.
     {% if not single_doc -%}
     What's New <whatsnew/v0.24.0>
     install
-    contributing
     overview
     10min
     tutorials
@@ -143,8 +142,6 @@ See the package overview for more detail about what's in the library.
     api/index
     {% endif -%}
     {% if not single_doc -%}
-    developer
-    internals
-    extending
+    development/index
     whatsnew/index
     {% endif -%}
diff --git a/setup.cfg b/setup.cfg
index 95c71826a80d4..6143cb8446216 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -47,7 +47,7 @@ ignore = E402,  # module level import not at top of file
 
 exclude =
     doc/source/basics.rst
-    doc/source/contributing_docstring.rst
+    doc/source/development/contributing_docstring.rst
 
 
 [yapf]

From 4b937ff59127bf0edec8e07a667fa222bf2e3b4e Mon Sep 17 00:00:00 2001
From: Marc Garcia <garcia.marc@gmail.com>
Date: Wed, 23 Jan 2019 17:52:11 +0000
Subject: [PATCH 10/48] DOC: Creating top-level getting started section, and
 moving pages inside (#24678)

---
 doc/source/{ => getting_started}/10min.rst     |  0
 doc/source/{ => getting_started}/basics.rst    |  0
 doc/source/{ => getting_started}/dsintro.rst   |  0
 doc/source/getting_started/index.rst           | 16 ++++++++++++++++
 doc/source/{ => getting_started}/overview.rst  |  2 +-
 doc/source/{ => getting_started}/tutorials.rst |  0
 doc/source/index.rst.template                  |  6 +-----
 setup.cfg                                      |  2 +-
 8 files changed, 19 insertions(+), 7 deletions(-)
 rename doc/source/{ => getting_started}/10min.rst (100%)
 rename doc/source/{ => getting_started}/basics.rst (100%)
 rename doc/source/{ => getting_started}/dsintro.rst (100%)
 create mode 100644 doc/source/getting_started/index.rst
 rename doc/source/{ => getting_started}/overview.rst (99%)
 rename doc/source/{ => getting_started}/tutorials.rst (100%)

diff --git a/doc/source/10min.rst b/doc/source/getting_started/10min.rst
similarity index 100%
rename from doc/source/10min.rst
rename to doc/source/getting_started/10min.rst
diff --git a/doc/source/basics.rst b/doc/source/getting_started/basics.rst
similarity index 100%
rename from doc/source/basics.rst
rename to doc/source/getting_started/basics.rst
diff --git a/doc/source/dsintro.rst b/doc/source/getting_started/dsintro.rst
similarity index 100%
rename from doc/source/dsintro.rst
rename to doc/source/getting_started/dsintro.rst
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst
new file mode 100644
index 0000000000000..116efe79beef1
--- /dev/null
+++ b/doc/source/getting_started/index.rst
@@ -0,0 +1,16 @@
+{{ header }}
+
+.. _getting_started:
+
+===============
+Getting started
+===============
+
+.. toctree::
+    :maxdepth: 2
+
+    overview
+    10min
+    basics
+    dsintro
+    tutorials
diff --git a/doc/source/overview.rst b/doc/source/getting_started/overview.rst
similarity index 99%
rename from doc/source/overview.rst
rename to doc/source/getting_started/overview.rst
index b98e2d4b9963c..1e07df47aadca 100644
--- a/doc/source/overview.rst
+++ b/doc/source/getting_started/overview.rst
@@ -119,5 +119,5 @@ The information about current institutional partners can be found on `pandas web
 License
 -------
 
-.. literalinclude:: ../../LICENSE
+.. literalinclude:: ../../../LICENSE
 
diff --git a/doc/source/tutorials.rst b/doc/source/getting_started/tutorials.rst
similarity index 100%
rename from doc/source/tutorials.rst
rename to doc/source/getting_started/tutorials.rst
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 53f116a1a0e0a..bc420a906b59c 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -124,12 +124,8 @@ See the package overview for more detail about what's in the library.
     {% if not single_doc -%}
     What's New <whatsnew/v0.24.0>
     install
-    overview
-    10min
-    tutorials
+    getting_started/index
     cookbook
-    dsintro
-    basics
     user_guide/index
     r_interface
     ecosystem
diff --git a/setup.cfg b/setup.cfg
index 6143cb8446216..7155cc1013544 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -46,7 +46,7 @@ ignore = E402,  # module level import not at top of file
          E711,  # comparison to none should be 'if cond is none:'
 
 exclude =
-    doc/source/basics.rst
+    doc/source/getting_started/basics.rst
     doc/source/development/contributing_docstring.rst
 
 

From 6a745d826d9d5f5f46e208b0dad4d4ce0524defe Mon Sep 17 00:00:00 2001
From: Marc Garcia <garcia.marc@gmail.com>
Date: Wed, 23 Jan 2019 18:30:34 +0000
Subject: [PATCH 11/48] DOC: Implementing redirect system, and adding
 user_guide redirects (#24715)

* DOC: Implementing redirect system, and adding user_guide redirects

* Using relative urls for the redirect

* Validating that no file is overwritten by a redirect

* Adding redirects for getting started and development sections
---
 doc/make.py       | 78 ++++++++++++++++++++++++++++++++++++++++++++++-
 doc/redirects.csv | 37 ++++++++++++++++++++++
 2 files changed, 114 insertions(+), 1 deletion(-)
 create mode 100644 doc/redirects.csv

diff --git a/doc/make.py b/doc/make.py
index 0b14a9dcd4c34..eb4a33a569c5a 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -15,15 +15,18 @@
 import sys
 import os
 import shutil
+import csv
 import subprocess
 import argparse
 import webbrowser
+import docutils
+import docutils.parsers.rst
 
 
 DOC_PATH = os.path.dirname(os.path.abspath(__file__))
 SOURCE_PATH = os.path.join(DOC_PATH, 'source')
 BUILD_PATH = os.path.join(DOC_PATH, 'build')
-BUILD_DIRS = ['doctrees', 'html', 'latex', 'plots', '_static', '_templates']
+REDIRECTS_FILE = os.path.join(DOC_PATH, 'redirects.csv')
 
 
 class DocBuilder:
@@ -139,6 +142,77 @@ def _open_browser(self, single_doc_html):
                            single_doc_html)
         webbrowser.open(url, new=2)
 
+    def _get_page_title(self, page):
+        """
+        Open the rst file `page` and extract its title.
+        """
+        fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page))
+        option_parser = docutils.frontend.OptionParser(
+            components=(docutils.parsers.rst.Parser,))
+        doc = docutils.utils.new_document(
+            '<doc>',
+            option_parser.get_default_values())
+        with open(fname) as f:
+            data = f.read()
+
+        parser = docutils.parsers.rst.Parser()
+        # do not generate any warning when parsing the rst
+        with open(os.devnull, 'a') as f:
+            doc.reporter.stream = f
+            parser.parse(data, doc)
+
+        section = next(node for node in doc.children
+                       if isinstance(node, docutils.nodes.section))
+        title = next(node for node in section.children
+                     if isinstance(node, docutils.nodes.title))
+
+        return title.astext()
+
+    def _add_redirects(self):
+        """
+        Create in the build directory an html file with a redirect,
+        for every row in REDIRECTS_FILE.
+        """
+        html = '''
+        <html>
+            <head>
+                <meta http-equiv="refresh" content="0;URL={url}"/>
+            </head>
+            <body>
+                <p>
+                    The page has been moved to <a href="{url}">{title}</a>
+                </p>
+            </body>
+        <html>
+        '''
+        with open(REDIRECTS_FILE) as mapping_fd:
+            reader = csv.reader(mapping_fd)
+            for row in reader:
+                if not row or row[0].strip().startswith('#'):
+                    continue
+
+                path = os.path.join(BUILD_PATH,
+                                    'html',
+                                    *row[0].split('/')) + '.html'
+
+                try:
+                    title = self._get_page_title(row[1])
+                except Exception:
+                    # the file can be an ipynb and not an rst, or docutils
+                    # may not be able to read the rst because it has some
+                    # sphinx specific stuff
+                    title = 'this page'
+
+                if os.path.exists(path):
+                    raise RuntimeError((
+                        'Redirection would overwrite an existing file: '
+                        '{}').format(path))
+
+                with open(path, 'w') as moved_page_fd:
+                    moved_page_fd.write(
+                        html.format(url='{}.html'.format(row[1]),
+                                    title=title))
+
     def html(self):
         """
         Build HTML documentation.
@@ -150,6 +224,8 @@ def html(self):
 
         if self.single_doc_html is not None:
             self._open_browser(self.single_doc_html)
+        else:
+            self._add_redirects()
         return ret_code
 
     def latex(self, force=False):
diff --git a/doc/redirects.csv b/doc/redirects.csv
new file mode 100644
index 0000000000000..4f4b3d7fc0780
--- /dev/null
+++ b/doc/redirects.csv
@@ -0,0 +1,37 @@
+# This file should contain all the redirects in the documentation
+# in the format `<old_path>,<new_path>`
+
+# getting started
+10min,getting_started/10min
+basics,getting_started/basics
+dsintro,getting_started/dsintro
+overview,getting_started/overview
+tutorials,getting_started/tutorials
+
+# user guide
+advanced,user_guide/advanced
+categorical,user_guide/categorical
+computation,user_guide/computation
+enhancingperf,user_guide/enhancingperf
+gotchas,user_guide/gotchas
+groupby,user_guide/groupby
+indexing,user_guide/indexing
+integer_na,user_guide/integer_na
+io,user_guide/io
+merging,user_guide/merging
+missing_data,user_guide/missing_data
+options,user_guide/options
+reshaping,user_guide/reshaping
+sparse,user_guide/sparse
+style,user_guide/style
+text,user_guide/text
+timedeltas,user_guide/timedeltas
+timeseries,user_guide/timeseries
+visualization,user_guide/visualization
+
+# development
+contributing,development/contributing
+contributing_docstring,development/contributing_docstring
+developer,development/developer
+extending,development/extending
+internals,development/internals

From 2fa0835737a0e3e111e893d30ed2f25b7249fd4b Mon Sep 17 00:00:00 2001
From: Tom Augspurger <TomAugspurger@users.noreply.github.com>
Date: Wed, 23 Jan 2019 15:37:15 -0600
Subject: [PATCH 12/48] DOC: fixups (#24888)

* Fixed heading on whatnew
* Remove empty scalars.rst
---
 doc/source/api/scalars.rst      | 0
 doc/source/whatsnew/v0.24.0.rst | 5 ++++-
 2 files changed, 4 insertions(+), 1 deletion(-)
 delete mode 100644 doc/source/api/scalars.rst

diff --git a/doc/source/api/scalars.rst b/doc/source/api/scalars.rst
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 3dd345890881c..9198c610f0f44 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -13,6 +13,9 @@ What's New in 0.24.0 (January XX, 2019)
 These are the changes in pandas 0.24.0. See :ref:`release` for a full changelog
 including other versions of pandas.
 
+Enhancements
+~~~~~~~~~~~~
+
 Highlights include
 
 * :ref:`Optional Nullable Integer Support <whatsnew_0240.enhancements.intna>`
@@ -1165,7 +1168,7 @@ Other API Changes
 .. _whatsnew_0240.api.extension:
 
 ExtensionType Changes
-^^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~~~
 
 **Equality and Hashability**
 

From 94d989e4ae682bbd39f5599ef1d1f455bea0c3fd Mon Sep 17 00:00:00 2001
From: Christopher Whelan <topherwhelan@gmail.com>
Date: Thu, 24 Jan 2019 04:44:54 -0800
Subject: [PATCH 13/48] CLN: fix typo in ctors.SeriesDtypesConstructors setup
 (#24894)

---
 asv_bench/benchmarks/ctors.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index 9082b4186bfa4..5715c4fb2d0d4 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -72,7 +72,7 @@ class SeriesDtypesConstructors(object):
 
     def setup(self):
         N = 10**4
-        self.arr = np.random.randn(N, N)
+        self.arr = np.random.randn(N)
         self.arr_str = np.array(['foo', 'bar', 'baz'], dtype=object)
         self.s = Series([Timestamp('20110101'), Timestamp('20120101'),
                          Timestamp('20130101')] * N * 10)

From 0c319f55761a60e0a626f57c0ee46d4326121cfd Mon Sep 17 00:00:00 2001
From: Tom Augspurger <TomAugspurger@users.noreply.github.com>
Date: Thu, 24 Jan 2019 08:44:11 -0600
Subject: [PATCH 14/48] DOC: No clean in sphinx_build (#24902)

Closes https://github.com/pandas-dev/pandas/issues/24727
---
 doc/make.py | 2 --
 1 file changed, 2 deletions(-)

diff --git a/doc/make.py b/doc/make.py
index eb4a33a569c5a..bc458d6b53cb0 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -121,8 +121,6 @@ def _sphinx_build(self, kind):
             raise ValueError('kind must be html or latex, '
                              'not {}'.format(kind))
 
-        self.clean()
-
         cmd = ['sphinx-build', '-b', kind]
         if self.num_jobs:
             cmd += ['-j', str(self.num_jobs)]

From 17ad7916b970e7117fcaf36b37e113d6aff9b4fb Mon Sep 17 00:00:00 2001
From: Joris Van den Bossche <jorisvandenbossche@gmail.com>
Date: Thu, 24 Jan 2019 16:25:29 +0100
Subject: [PATCH 15/48] BUG (output formatting): use fixed with for truncation
 column instead of inferring from last column (#24905)

---
 doc/source/whatsnew/v0.24.0.rst        |  1 +
 pandas/io/formats/format.py            | 12 ++------
 pandas/tests/io/formats/test_format.py | 41 ++++++++++++++++++--------
 3 files changed, 32 insertions(+), 22 deletions(-)

diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 9198c610f0f44..1c44d35aae4d1 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1755,6 +1755,7 @@ I/O
 - Bug in :meth:`DataFrame.to_stata` and :class:`pandas.io.stata.StataWriter117` that produced invalid files when using strLs with non-ASCII characters (:issue:`23573`)
 - Bug in :class:`HDFStore` that caused it to raise ``ValueError`` when reading a Dataframe in Python 3 from fixed format written in Python 2 (:issue:`24510`)
 - Bug in :func:`DataFrame.to_string()` and more generally in the floating ``repr`` formatter. Zeros were not trimmed if ``inf`` was present in a columns while it was the case with NA values. Zeros are now trimmed as in the presence of NA (:issue:`24861`).
+- Bug in the ``repr`` when truncating the number of columns and having a wide last column (:issue:`24849`).
 
 Plotting
 ^^^^^^^^
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 2c1fcab1ebde9..62fa04e784072 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -435,9 +435,6 @@ def _chk_truncate(self):
         """
         from pandas.core.reshape.concat import concat
 
-        # Column of which first element is used to determine width of a dot col
-        self.tr_size_col = -1
-
         # Cut the data to the information actually printed
         max_cols = self.max_cols
         max_rows = self.max_rows
@@ -556,10 +553,7 @@ def _to_str_columns(self):
 
         if truncate_h:
             col_num = self.tr_col_num
-            # infer from column header
-            col_width = self.adj.len(strcols[self.tr_size_col][0])
-            strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] *
-                           (len(str_index)))
+            strcols.insert(self.tr_col_num + 1, [' ...'] * (len(str_index)))
         if truncate_v:
             n_header_rows = len(str_index) - len(frame)
             row_num = self.tr_row_num
@@ -577,8 +571,8 @@ def _to_str_columns(self):
                 if ix == 0:
                     dot_mode = 'left'
                 elif is_dot_col:
-                    cwidth = self.adj.len(strcols[self.tr_size_col][0])
-                    dot_mode = 'center'
+                    cwidth = 4
+                    dot_mode = 'right'
                 else:
                     dot_mode = 'right'
                 dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 31ab1e050d95c..5d922ccaf1fd5 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -345,6 +345,15 @@ def test_repr_truncates_terminal_size_full(self, monkeypatch):
                             lambda: terminal_size)
         assert "..." not in str(df)
 
+    def test_repr_truncation_column_size(self):
+        # dataframe with last column very wide -> check it is not used to
+        # determine size of truncation (...) column
+        df = pd.DataFrame({'a': [108480, 30830], 'b': [12345, 12345],
+                           'c': [12345, 12345], 'd': [12345, 12345],
+                           'e': ['a' * 50] * 2})
+        assert "..." in str(df)
+        assert "    ...    " not in str(df)
+
     def test_repr_max_columns_max_rows(self):
         term_width, term_height = get_terminal_size()
         if term_width < 10 or term_height < 10:
@@ -543,7 +552,7 @@ def test_to_string_with_formatters_unicode(self):
             formatters={u('c/\u03c3'): lambda x: '{x}'.format(x=x)})
         assert result == u('  c/\u03c3\n') + '0   1\n1   2\n2   3'
 
-    def test_east_asian_unicode_frame(self):
+    def test_east_asian_unicode_false(self):
         if PY3:
             _rep = repr
         else:
@@ -643,17 +652,23 @@ def test_east_asian_unicode_frame(self):
                                u'ああああ': [u'さ', u'し', u'す', u'せ']},
                               columns=['a', 'b', 'c', u'ああああ'])
 
-            expected = (u"        a ...  ああああ\n0   あああああ ...     さ\n"
-                        u"..    ... ...   ...\n3     えええ ...     せ\n"
+            expected = (u"        a  ... ああああ\n0   あああああ  ...    さ\n"
+                        u"..    ...  ...  ...\n3     えええ  ...    せ\n"
                         u"\n[4 rows x 4 columns]")
             assert _rep(df) == expected
 
             df.index = [u'あああ', u'いいいい', u'う', 'aaa']
-            expected = (u"         a ...  ああああ\nあああ  あああああ ...     さ\n"
-                        u"..     ... ...   ...\naaa    えええ ...     せ\n"
+            expected = (u"         a  ... ああああ\nあああ  あああああ  ...    さ\n"
+                        u"..     ...  ...  ...\naaa    えええ  ...    せ\n"
                         u"\n[4 rows x 4 columns]")
             assert _rep(df) == expected
 
+    def test_east_asian_unicode_true(self):
+        if PY3:
+            _rep = repr
+        else:
+            _rep = unicode  # noqa
+
         # Emable Unicode option -----------------------------------------
         with option_context('display.unicode.east_asian_width', True):
 
@@ -757,18 +772,18 @@ def test_east_asian_unicode_frame(self):
                                    u'ああああ': [u'さ', u'し', u'す', u'せ']},
                                   columns=['a', 'b', 'c', u'ああああ'])
 
-                expected = (u"             a   ...    ああああ\n"
-                            u"0   あああああ   ...          さ\n"
-                            u"..         ...   ...         ...\n"
-                            u"3       えええ   ...          せ\n"
+                expected = (u"             a  ... ああああ\n"
+                            u"0   あああああ  ...       さ\n"
+                            u"..         ...  ...      ...\n"
+                            u"3       えええ  ...       せ\n"
                             u"\n[4 rows x 4 columns]")
                 assert _rep(df) == expected
 
                 df.index = [u'あああ', u'いいいい', u'う', 'aaa']
-                expected = (u"                 a   ...    ああああ\n"
-                            u"あああ  あああああ   ...          さ\n"
-                            u"...            ...   ...         ...\n"
-                            u"aaa         えええ   ...          せ\n"
+                expected = (u"                 a  ... ああああ\n"
+                            u"あああ  あああああ  ...       さ\n"
+                            u"...            ...  ...      ...\n"
+                            u"aaa         えええ  ...       せ\n"
                             u"\n[4 rows x 4 columns]")
                 assert _rep(df) == expected
 

From e2c0b120eb19ecc93a70f9a9c3cb51417cb55d1f Mon Sep 17 00:00:00 2001
From: Joris Van den Bossche <jorisvandenbossche@gmail.com>
Date: Thu, 24 Jan 2019 16:26:06 +0100
Subject: [PATCH 16/48] DOC: also redirect old whatsnew url (#24906)

---
 doc/redirects.csv | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/doc/redirects.csv b/doc/redirects.csv
index 4f4b3d7fc0780..e0de03745aaa8 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -1,6 +1,10 @@
 # This file should contain all the redirects in the documentation
 # in the format `<old_path>,<new_path>`
 
+# whatsnew
+whatsnew,whatsnew/index
+release,whatsnew/index
+
 # getting started
 10min,getting_started/10min
 basics,getting_started/basics

From 0a4665a5fe716c28ec2e756611b64efd32d63148 Mon Sep 17 00:00:00 2001
From: Tom Augspurger <TomAugspurger@users.noreply.github.com>
Date: Thu, 24 Jan 2019 10:45:05 -0600
Subject: [PATCH 17/48] Revert BUG-24212 fix usage of Index.take in pd.merge
 (#24904)

* Revert BUG-24212 fix usage of Index.take in pd.merge

xref https://github.com/pandas-dev/pandas/pull/24733/
xref https://github.com/pandas-dev/pandas/issues/24897

* test 0.23.4 output

* added note about buggy test
---
 doc/source/whatsnew/v0.24.0.rst          |  1 -
 pandas/core/reshape/merge.py             | 41 ++----------------------
 pandas/tests/reshape/merge/test_merge.py | 17 ++++++++++
 3 files changed, 19 insertions(+), 40 deletions(-)

diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 1c44d35aae4d1..4efe24789af28 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1827,7 +1827,6 @@ Reshaping
 - Bug in :func:`DataFrame.unstack` where a ``ValueError`` was raised when unstacking timezone aware values (:issue:`18338`)
 - Bug in :func:`DataFrame.stack` where timezone aware values were converted to timezone naive values (:issue:`19420`)
 - Bug in :func:`merge_asof` where a ``TypeError`` was raised when ``by_col`` were timezone aware values (:issue:`21184`)
-- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`)
 - Bug showing an incorrect shape when throwing error during ``DataFrame`` construction. (:issue:`20742`)
 
 .. _whatsnew_0240.bug_fixes.sparse:
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 0a51f2ee0dce7..e11847d2b8ce2 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -757,19 +757,13 @@ def _get_join_info(self):
 
             if self.right_index:
                 if len(self.left) > 0:
-                    join_index = self._create_join_index(self.left.index,
-                                                         self.right.index,
-                                                         left_indexer,
-                                                         how='right')
+                    join_index = self.left.index.take(left_indexer)
                 else:
                     join_index = self.right.index.take(right_indexer)
                     left_indexer = np.array([-1] * len(join_index))
             elif self.left_index:
                 if len(self.right) > 0:
-                    join_index = self._create_join_index(self.right.index,
-                                                         self.left.index,
-                                                         right_indexer,
-                                                         how='left')
+                    join_index = self.right.index.take(right_indexer)
                 else:
                     join_index = self.left.index.take(left_indexer)
                     right_indexer = np.array([-1] * len(join_index))
@@ -780,37 +774,6 @@ def _get_join_info(self):
             join_index = join_index.astype(object)
         return join_index, left_indexer, right_indexer
 
-    def _create_join_index(self, index, other_index, indexer, how='left'):
-        """
-        Create a join index by rearranging one index to match another
-
-        Parameters
-        ----------
-        index: Index being rearranged
-        other_index: Index used to supply values not found in index
-        indexer: how to rearrange index
-        how: replacement is only necessary if indexer based on other_index
-
-        Returns
-        -------
-        join_index
-        """
-        join_index = index.take(indexer)
-        if (self.how in (how, 'outer') and
-                not isinstance(other_index, MultiIndex)):
-            # if final index requires values in other_index but not target
-            # index, indexer may hold missing (-1) values, causing Index.take
-            # to take the final value in target index
-            mask = indexer == -1
-            if np.any(mask):
-                # if values missing (-1) from target index,
-                # take from other_index instead
-                join_list = join_index.to_numpy()
-                join_list[mask] = other_index.to_numpy()[mask]
-                join_index = Index(join_list, dtype=join_index.dtype,
-                                   name=join_index.name)
-        return join_index
-
     def _get_merge_keys(self):
         """
         Note: has side effects (copy/delete key columns)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index e123a5171769d..f0a3ddc8ce8a4 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -940,6 +940,7 @@ def test_merge_two_empty_df_no_division_error(self):
             merge(a, a, on=('a', 'b'))
 
     @pytest.mark.parametrize('how', ['left', 'outer'])
+    @pytest.mark.xfail(reason="GH-24897")
     def test_merge_on_index_with_more_values(self, how):
         # GH 24212
         # pd.merge gets [-1, -1, 0, 1] as right_indexer, ensure that -1 is
@@ -959,6 +960,22 @@ def test_merge_on_index_with_more_values(self, how):
         expected.set_index('a', drop=False, inplace=True)
         assert_frame_equal(result, expected)
 
+    def test_merge_right_index_right(self):
+        # Note: the expected output here is probably incorrect.
+        # See https://github.com/pandas-dev/pandas/issues/17257 for more.
+        # We include this as a regression test for GH-24897.
+        left = pd.DataFrame({'a': [1, 2, 3], 'key': [0, 1, 1]})
+        right = pd.DataFrame({'b': [1, 2, 3]})
+
+        expected = pd.DataFrame({'a': [1, 2, 3, None],
+                                 'key': [0, 1, 1, 2],
+                                 'b': [1, 2, 2, 3]},
+                                columns=['a', 'key', 'b'],
+                                index=[0, 1, 2, 2])
+        result = left.merge(right, left_on='key', right_index=True,
+                            how='right')
+        tm.assert_frame_equal(result, expected)
+
 
 def _check_merge(x, y):
     for how in ['inner', 'left', 'outer']:

From fa12b9ecbea36a504249e74579ee2124f98bcc3f Mon Sep 17 00:00:00 2001
From: Tom Augspurger <TomAugspurger@users.noreply.github.com>
Date: Thu, 24 Jan 2019 10:48:54 -0600
Subject: [PATCH 18/48] DOC: Add experimental note to DatetimeArray and
 TimedeltaArray (#24882)

* DOC: Add experimental note to DatetimeArray and TimedeltaArray
---
 doc/source/user_guide/integer_na.rst |  6 +++++
 doc/source/whatsnew/v0.24.0.rst      |  8 ++++++
 pandas/core/arrays/datetimes.py      | 13 ++++++++++
 pandas/core/arrays/integer.py        | 39 +++++++++++++++++++++++++---
 pandas/core/arrays/timedeltas.py     | 36 +++++++++++++++++++++++++
 5 files changed, 99 insertions(+), 3 deletions(-)

diff --git a/doc/source/user_guide/integer_na.rst b/doc/source/user_guide/integer_na.rst
index eb0c5e3d05863..c5667e9319ca6 100644
--- a/doc/source/user_guide/integer_na.rst
+++ b/doc/source/user_guide/integer_na.rst
@@ -10,6 +10,12 @@ Nullable Integer Data Type
 
 .. versionadded:: 0.24.0
 
+.. note::
+
+   IntegerArray is currently experimental. Its API or implementation may
+   change without warning.
+
+
 In :ref:`missing_data`, we saw that pandas primarily uses ``NaN`` to represent
 missing data. Because ``NaN`` is a float, this forces an array of integers with
 any missing values to become floating point. In some cases, this may not matter
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 4efe24789af28..3b3fad22ce949 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -32,6 +32,11 @@ Optional Integer NA Support
 Pandas has gained the ability to hold integer dtypes with missing values. This long requested feature is enabled through the use of :ref:`extension types <extending.extension-types>`.
 Here is an example of the usage.
 
+.. note::
+
+   IntegerArray is currently experimental. Its API or implementation may
+   change without warning.
+
 We can construct a ``Series`` with the specified dtype. The dtype string ``Int64`` is a pandas ``ExtensionDtype``. Specifying a list or array using the traditional missing value
 marker of ``np.nan`` will infer to integer dtype. The display of the ``Series`` will also use the ``NaN`` to indicate missing values in string outputs. (:issue:`20700`, :issue:`20747`, :issue:`22441`, :issue:`21789`, :issue:`22346`)
 
@@ -213,6 +218,9 @@ from the ``Series``:
    ser.array
    pser.array
 
+These return an instance of :class:`IntervalArray` or :class:`arrays.PeriodArray`,
+the new extension arrays that back interval and period data.
+
 .. warning::
 
    For backwards compatibility, :attr:`Series.values` continues to return
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index f2aeb1c1309de..d7a8417a71be2 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -218,6 +218,13 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin,
 
     .. versionadded:: 0.24.0
 
+    .. warning::
+
+       DatetimeArray is currently experimental, and its API may change
+       without warning. In particular, :attr:`DatetimeArray.dtype` is
+       expected to change to always be an instance of an ``ExtensionDtype``
+       subclass.
+
     Parameters
     ----------
     values : Series, Index, DatetimeArray, ndarray
@@ -511,6 +518,12 @@ def dtype(self):
         """
         The dtype for the DatetimeArray.
 
+        .. warning::
+
+           A future version of pandas will change dtype to never be a
+           ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
+           always be an instance of an ``ExtensionDtype`` subclass.
+
         Returns
         -------
         numpy.dtype or DatetimeTZDtype
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index b3dde6bf2bd93..a6a4a49d3a939 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -225,24 +225,57 @@ class IntegerArray(ExtensionArray, ExtensionOpsMixin):
     """
     Array of integer (optional missing) values.
 
+    .. versionadded:: 0.24.0
+
+    .. warning::
+
+       IntegerArray is currently experimental, and its API or internal
+       implementation may change without warning.
+
     We represent an IntegerArray with 2 numpy arrays:
 
     - data: contains a numpy integer array of the appropriate dtype
     - mask: a boolean array holding a mask on the data, True is missing
 
     To construct an IntegerArray from generic array-like input, use
-    ``integer_array`` function instead.
+    :func:`pandas.array` with one of the integer dtypes (see examples).
+
+    See :ref:`integer_na` for more.
 
     Parameters
     ----------
-    values : integer 1D numpy array
-    mask : boolean 1D numpy array
+    values : numpy.ndarray
+        A 1-d integer-dtype array.
+    mask : numpy.ndarray
+        A 1-d boolean-dtype array indicating missing values.
     copy : bool, default False
+        Whether to copy the `values` and `mask`.
 
     Returns
     -------
     IntegerArray
 
+    Examples
+    --------
+    Create an IntegerArray with :func:`pandas.array`.
+
+    >>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
+    >>> int_array
+    <IntegerArray>
+    [1, NaN, 3]
+    Length: 3, dtype: Int32
+
+    String aliases for the dtypes are also available. They are capitalized.
+
+    >>> pd.array([1, None, 3], dtype='Int32')
+    <IntegerArray>
+    [1, NaN, 3]
+    Length: 3, dtype: Int32
+
+    >>> pd.array([1, None, 3], dtype='UInt16')
+    <IntegerArray>
+    [1, NaN, 3]
+    Length: 3, dtype: UInt16
     """
 
     @cache_readonly
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 910cb96a86216..4f0c96f7927da 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -107,6 +107,29 @@ def wrapper(self, other):
 
 
 class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps):
+    """
+    Pandas ExtensionArray for timedelta data.
+
+    .. versionadded:: 0.24.0
+
+    .. warning::
+
+       TimedeltaArray is currently experimental, and its API may change
+       without warning. In particular, :attr:`TimedeltaArray.dtype` is
+       expected to change to be an instance of an ``ExtensionDtype``
+       subclass.
+
+    Parameters
+    ----------
+    values : array-like
+        The timedelta data.
+
+    dtype : numpy.dtype
+        Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted.
+    freq : Offset, optional
+    copy : bool, default False
+        Whether to copy the underlying array of data.
+    """
     _typ = "timedeltaarray"
     _scalar_type = Timedelta
     __array_priority__ = 1000
@@ -128,6 +151,19 @@ def _box_func(self):
 
     @property
     def dtype(self):
+        """
+        The dtype for the TimedeltaArray.
+
+        .. warning::
+
+           A future version of pandas will change dtype to be an instance
+           of a :class:`pandas.api.extensions.ExtensionDtype` subclass,
+           not a ``numpy.dtype``.
+
+        Returns
+        -------
+        numpy.dtype
+        """
         return _TD_DTYPE
 
     # ----------------------------------------------------------------

From 5761e359b65631db491349a65fc21d0da51dcc0f Mon Sep 17 00:00:00 2001
From: Tom Augspurger <TomAugspurger@users.noreply.github.com>
Date: Thu, 24 Jan 2019 13:01:23 -0600
Subject: [PATCH 19/48] Disable M8 in nanops (#24907)

* Disable M8 in nanops

Closes https://github.com/pandas-dev/pandas/issues/24752
---
 pandas/core/nanops.py                |  6 ++++--
 pandas/tests/frame/test_analytics.py | 19 +++++++++++++++++++
 pandas/tests/test_nanops.py          |  3 +++
 3 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index cafd3a9915fa0..86c3c380636c9 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -14,7 +14,8 @@
     _get_dtype, is_any_int_dtype, is_bool_dtype, is_complex, is_complex_dtype,
     is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype,
     is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype,
-    is_object_dtype, is_scalar, is_timedelta64_dtype)
+    is_object_dtype, is_scalar, is_timedelta64_dtype, pandas_dtype)
+from pandas.core.dtypes.dtypes import DatetimeTZDtype
 from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
 
 import pandas.core.common as com
@@ -57,7 +58,7 @@ class disallow(object):
 
     def __init__(self, *dtypes):
         super(disallow, self).__init__()
-        self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
+        self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes)
 
     def check(self, obj):
         return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
@@ -437,6 +438,7 @@ def nansum(values, axis=None, skipna=True, min_count=0, mask=None):
     return _wrap_results(the_sum, dtype)
 
 
+@disallow('M8', DatetimeTZDtype)
 @bottleneck_switch()
 def nanmean(values, axis=None, skipna=True, mask=None):
     """
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index f2c3f50c291c3..386e5f57617cf 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -794,6 +794,25 @@ def test_mean(self, float_frame_with_na, float_frame, float_string_frame):
                             check_dates=True)
         assert_stat_op_api('mean', float_frame, float_string_frame)
 
+    @pytest.mark.parametrize('tz', [None, 'UTC'])
+    def test_mean_mixed_datetime_numeric(self, tz):
+        # https://github.com/pandas-dev/pandas/issues/24752
+        df = pd.DataFrame({"A": [1, 1],
+                           "B": [pd.Timestamp('2000', tz=tz)] * 2})
+        result = df.mean()
+        expected = pd.Series([1.0], index=['A'])
+        tm.assert_series_equal(result, expected)
+
+    @pytest.mark.parametrize('tz', [None, 'UTC'])
+    def test_mean_excludeds_datetimes(self, tz):
+        # https://github.com/pandas-dev/pandas/issues/24752
+        # Our long-term desired behavior is unclear, but the behavior in
+        # 0.24.0rc1 was buggy.
+        df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
+        result = df.mean()
+        expected = pd.Series()
+        tm.assert_series_equal(result, expected)
+
     def test_product(self, float_frame_with_na, float_frame,
                      float_string_frame):
         assert_stat_op_calc('product', np.prod, float_frame_with_na)
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 4bcd16a86e865..cf5ef6cf15eca 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -971,6 +971,9 @@ def prng(self):
 
 class TestDatetime64NaNOps(object):
     @pytest.mark.parametrize('tz', [None, 'UTC'])
+    @pytest.mark.xfail(reason="disabled")
+    # Enabling mean changes the behavior of DataFrame.mean
+    # See https://github.com/pandas-dev/pandas/issues/24752
     def test_nanmean(self, tz):
         dti = pd.date_range('2016-01-01', periods=3, tz=tz)
         expected = dti[1]

From 539c54f30736b4163b70f1ba903aee7a4243ab1f Mon Sep 17 00:00:00 2001
From: Christopher Whelan <topherwhelan@gmail.com>
Date: Thu, 24 Jan 2019 18:26:45 -0800
Subject: [PATCH 20/48] CLN: fix typo in asv benchmark of non_unique_sorted,
 which was not sorted (#24917)

---
 asv_bench/benchmarks/index_object.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index f76040921393f..bbe164d4858ab 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -138,7 +138,8 @@ def setup(self, dtype):
         self.sorted = self.idx.sort_values()
         half = N // 2
         self.non_unique = self.idx[:half].append(self.idx[:half])
-        self.non_unique_sorted = self.sorted[:half].append(self.sorted[:half])
+        self.non_unique_sorted = (self.sorted[:half].append(self.sorted[:half])
+                                  .sort_values())
         self.key = self.sorted[N // 4]
 
     def time_boolean_array(self, dtype):

From f971b11024362c797fd5e83ea2f2b708157e39c8 Mon Sep 17 00:00:00 2001
From: Joris Van den Bossche <jorisvandenbossche@gmail.com>
Date: Fri, 25 Jan 2019 08:10:40 +0100
Subject: [PATCH 21/48] API/VIS: remove misc plotting methods from plot
 accessor (revert #23811) (#24912)

---
 doc/source/whatsnew/v0.24.0.rst      |  1 -
 pandas/plotting/_core.py             | 23 -----------------------
 pandas/tests/plotting/test_frame.py  | 16 ----------------
 pandas/tests/plotting/test_series.py | 13 -------------
 4 files changed, 53 deletions(-)

diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 3b3fad22ce949..e7c9a4752db06 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -429,7 +429,6 @@ Other Enhancements
 - :meth:`MultiIndex.to_flat_index` has been added to flatten multiple levels into a single-level :class:`Index` object.
 - :meth:`DataFrame.to_stata` and :class:`pandas.io.stata.StataWriter117` can write mixed sting columns to Stata strl format (:issue:`23633`)
 - :meth:`DataFrame.between_time` and :meth:`DataFrame.at_time` have gained the ``axis`` parameter (:issue:`8839`)
-- The ``scatter_matrix``, ``andrews_curves``, ``parallel_coordinates``, ``lag_plot``, ``autocorrelation_plot``, ``bootstrap_plot``, and ``radviz`` plots from the ``pandas.plotting`` module are now accessible from calling :meth:`DataFrame.plot` (:issue:`11978`)
 - :meth:`DataFrame.to_records` now accepts ``index_dtypes`` and ``column_dtypes`` parameters to allow different data types in stored column and index records (:issue:`18146`)
 - :class:`IntervalIndex` has gained the :attr:`~IntervalIndex.is_overlapping` attribute to indicate if the ``IntervalIndex`` contains any overlapping intervals (:issue:`23309`)
 - :func:`pandas.DataFrame.to_sql` has gained the ``method`` argument to control SQL insertion clause. See the :ref:`insertion method <io.sql.method>` section in the documentation. (:issue:`8953`)
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 3ba06c0638317..e543ab88f53b2 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -26,7 +26,6 @@
 from pandas.core.generic import _shared_doc_kwargs, _shared_docs
 
 from pandas.io.formats.printing import pprint_thing
-from pandas.plotting import _misc as misc
 from pandas.plotting._compat import _mpl_ge_3_0_0
 from pandas.plotting._style import _get_standard_colors, plot_params
 from pandas.plotting._tools import (
@@ -2906,15 +2905,6 @@ def pie(self, **kwds):
         """
         return self(kind='pie', **kwds)
 
-    def lag(self, *args, **kwds):
-        return misc.lag_plot(self._parent, *args, **kwds)
-
-    def autocorrelation(self, *args, **kwds):
-        return misc.autocorrelation_plot(self._parent, *args, **kwds)
-
-    def bootstrap(self, *args, **kwds):
-        return misc.bootstrap_plot(self._parent, *args, **kwds)
-
 
 class FramePlotMethods(BasePlotMethods):
     """DataFrame plotting accessor and method
@@ -3610,16 +3600,3 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
         if gridsize is not None:
             kwds['gridsize'] = gridsize
         return self(kind='hexbin', x=x, y=y, C=C, **kwds)
-
-    def scatter_matrix(self, *args, **kwds):
-        return misc.scatter_matrix(self._parent, *args, **kwds)
-
-    def andrews_curves(self, class_column, *args, **kwds):
-        return misc.andrews_curves(self._parent, class_column, *args, **kwds)
-
-    def parallel_coordinates(self, class_column, *args, **kwds):
-        return misc.parallel_coordinates(self._parent, class_column,
-                                         *args, **kwds)
-
-    def radviz(self, class_column, *args, **kwds):
-        return misc.radviz(self._parent, class_column, *args, **kwds)
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 0e7672f4e2f9d..98b241f5c8206 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2988,22 +2988,6 @@ def test_secondary_axis_font_size(self, method):
         self._check_ticks_props(axes=ax.right_ax,
                                 ylabelsize=fontsize)
 
-    def test_misc_bindings(self, monkeypatch):
-        df = pd.DataFrame(randn(10, 10), columns=list('abcdefghij'))
-        monkeypatch.setattr('pandas.plotting._misc.scatter_matrix',
-                            lambda x: 2)
-        monkeypatch.setattr('pandas.plotting._misc.andrews_curves',
-                            lambda x, y: 2)
-        monkeypatch.setattr('pandas.plotting._misc.parallel_coordinates',
-                            lambda x, y: 2)
-        monkeypatch.setattr('pandas.plotting._misc.radviz',
-                            lambda x, y: 2)
-
-        assert df.plot.scatter_matrix() == 2
-        assert df.plot.andrews_curves('a') == 2
-        assert df.plot.parallel_coordinates('a') == 2
-        assert df.plot.radviz('a') == 2
-
 
 def _generate_4_axes_via_gridspec():
     import matplotlib.pyplot as plt
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 1e223c20f55b7..07a4b168a66f1 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -878,19 +878,6 @@ def test_custom_business_day_freq(self):
 
         _check_plot_works(s.plot)
 
-    def test_misc_bindings(self, monkeypatch):
-        s = Series(randn(10))
-        monkeypatch.setattr('pandas.plotting._misc.lag_plot',
-                            lambda x: 2)
-        monkeypatch.setattr('pandas.plotting._misc.autocorrelation_plot',
-                            lambda x: 2)
-        monkeypatch.setattr('pandas.plotting._misc.bootstrap_plot',
-                            lambda x: 2)
-
-        assert s.plot.lag() == 2
-        assert s.plot.autocorrelation() == 2
-        assert s.plot.bootstrap() == 2
-
     @pytest.mark.xfail
     def test_plot_accessor_updates_on_inplace(self):
         s = Series([1, 2, 3, 4])

From bb86a9d3708c3ddf1f301d7988128e84518d0cce Mon Sep 17 00:00:00 2001
From: Joris Van den Bossche <jorisvandenbossche@gmail.com>
Date: Fri, 25 Jan 2019 09:45:35 +0100
Subject: [PATCH 22/48] DOC: some 0.24.0 whatsnew clean-up (#24911)

---
 doc/source/whatsnew/v0.24.0.rst | 116 +++++++++++++++++---------------
 1 file changed, 62 insertions(+), 54 deletions(-)

diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index e7c9a4752db06..f0f99d2def136 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -10,27 +10,34 @@ What's New in 0.24.0 (January XX, 2019)
 
 {{ header }}
 
-These are the changes in pandas 0.24.0. See :ref:`release` for a full changelog
-including other versions of pandas.
+This is a major release from 0.23.4 and includes a number of API changes, new
+features, enhancements, and performance improvements along with a large number
+of bug fixes.
 
-Enhancements
-~~~~~~~~~~~~
+Highlights include:
 
-Highlights include
-
-* :ref:`Optional Nullable Integer Support <whatsnew_0240.enhancements.intna>`
+* :ref:`Optional Integer NA Support <whatsnew_0240.enhancements.intna>`
 * :ref:`New APIs for accessing the array backing a Series or Index <whatsnew_0240.values_api>`
 * :ref:`A new top-level method for creating arrays <whatsnew_0240.enhancements.array>`
 * :ref:`Store Interval and Period data in a Series or DataFrame <whatsnew_0240.enhancements.interval>`
 * :ref:`Support for joining on two MultiIndexes <whatsnew_0240.enhancements.join_with_two_multiindexes>`
 
+
+Check the :ref:`API Changes <whatsnew_0240.api_breaking>` and :ref:`deprecations <whatsnew_0240.deprecations>` before updating.
+
+These are the changes in pandas 0.24.0. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+
+Enhancements
+~~~~~~~~~~~~
+
 .. _whatsnew_0240.enhancements.intna:
 
 Optional Integer NA Support
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Pandas has gained the ability to hold integer dtypes with missing values. This long requested feature is enabled through the use of :ref:`extension types <extending.extension-types>`.
-Here is an example of the usage.
 
 .. note::
 
@@ -65,7 +72,7 @@ Operations on these dtypes will propagate ``NaN`` as other pandas operations.
    # coerce when needed
    s + 0.01
 
-These dtypes can operate as part of of ``DataFrame``.
+These dtypes can operate as part of a ``DataFrame``.
 
 .. ipython:: python
 
@@ -74,7 +81,7 @@ These dtypes can operate as part of of ``DataFrame``.
    df.dtypes
 
 
-These dtypes can be merged & reshaped & casted.
+These dtypes can be merged, reshaped, and casted.
 
 .. ipython:: python
 
@@ -117,6 +124,7 @@ a new ndarray of period objects each time.
 
 .. ipython:: python
 
+   idx.values
    id(idx.values)
    id(idx.values)
 
@@ -129,7 +137,7 @@ If you need an actual NumPy array, use :meth:`Series.to_numpy` or :meth:`Index.t
 
 For Series and Indexes backed by normal NumPy arrays, :attr:`Series.array` will return a
 new :class:`arrays.PandasArray`, which is a thin (no-copy) wrapper around a
-:class:`numpy.ndarray`. :class:`arrays.PandasArray` isn't especially useful on its own,
+:class:`numpy.ndarray`. :class:`~arrays.PandasArray` isn't especially useful on its own,
 but it does provide the same interface as any extension array defined in pandas or by
 a third-party library.
 
@@ -147,14 +155,13 @@ See :ref:`Dtypes <basics.dtypes>` and :ref:`Attributes and Underlying Data <basi
 
 .. _whatsnew_0240.enhancements.array:
 
-Array
-^^^^^
+``pandas.array``: a new top-level method for creating arrays
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 A new top-level method :func:`array` has been added for creating 1-dimensional arrays (:issue:`22860`).
 This can be used to create any :ref:`extension array <extending.extension-types>`, including
-extension arrays registered by :ref:`3rd party libraries <ecosystem.extensions>`. See
-
-See :ref:`Dtypes <basics.dtypes>` for more on extension arrays.
+extension arrays registered by :ref:`3rd party libraries <ecosystem.extensions>`.
+See the :ref:`dtypes docs <basics.dtypes>` for more on extension arrays.
 
 .. ipython:: python
 
@@ -163,15 +170,15 @@ See :ref:`Dtypes <basics.dtypes>` for more on extension arrays.
 
 Passing data for which there isn't dedicated extension type (e.g. float, integer, etc.)
 will return a new :class:`arrays.PandasArray`, which is just a thin (no-copy)
-wrapper around a :class:`numpy.ndarray` that satisfies the extension array interface.
+wrapper around a :class:`numpy.ndarray` that satisfies the pandas extension array interface.
 
 .. ipython:: python
 
    pd.array([1, 2, 3])
 
-On their own, a :class:`arrays.PandasArray` isn't a very useful object.
+On their own, a :class:`~arrays.PandasArray` isn't a very useful object.
 But if you need write low-level code that works generically for any
-:class:`~pandas.api.extensions.ExtensionArray`, :class:`arrays.PandasArray`
+:class:`~pandas.api.extensions.ExtensionArray`, :class:`~arrays.PandasArray`
 satisfies that need.
 
 Notice that by default, if no ``dtype`` is specified, the dtype of the returned
@@ -202,7 +209,7 @@ For periods:
 
 .. ipython:: python
 
-   pser = pd.Series(pd.date_range("2000", freq="D", periods=5))
+   pser = pd.Series(pd.period_range("2000", freq="D", periods=5))
    pser
    pser.dtype
 
@@ -267,23 +274,6 @@ For earlier versions this can be done using the following.
    pd.merge(left.reset_index(), right.reset_index(),
             on=['key'], how='inner').set_index(['key', 'X', 'Y'])
 
-
-.. _whatsnew_0240.enhancements.extension_array_operators:
-
-``ExtensionArray`` operator support
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison
-operators (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``:
-
-1. Define each of the operators on your ``ExtensionArray`` subclass.
-2. Use an operator implementation from pandas that depends on operators that are already defined
-   on the underlying elements (scalars) of the ``ExtensionArray``.
-
-See the :ref:`ExtensionArray Operator Support
-<extending.extension.operator>` documentation section for details on both
-ways of adding operator support.
-
 .. _whatsnew_0240.enhancements.read_html:
 
 ``read_html`` Enhancements
@@ -343,7 +333,7 @@ convenient way to apply users' predefined styling functions, and can help reduce
     df.style.pipe(format_and_align).set_caption('Summary of results.')
 
 Similar methods already exist for other classes in pandas, including :meth:`DataFrame.pipe`,
-:meth:`pandas.core.groupby.GroupBy.pipe`, and :meth:`pandas.core.resample.Resampler.pipe`.
+:meth:`GroupBy.pipe() <pandas.core.groupby.GroupBy.pipe>`, and :meth:`Resampler.pipe() <pandas.core.resample.Resampler.pipe>`.
 
 .. _whatsnew_0240.enhancements.rename_axis:
 
@@ -351,7 +341,7 @@ Renaming names in a MultiIndex
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 :func:`DataFrame.rename_axis` now supports ``index`` and ``columns`` arguments
-and :func:`Series.rename_axis` supports ``index`` argument (:issue:`19978`)
+and :func:`Series.rename_axis` supports ``index`` argument (:issue:`19978`).
 
 This change allows a dictionary to be passed so that some of the names
 of a ``MultiIndex`` can be changed.
@@ -379,13 +369,13 @@ Other Enhancements
 - :func:`DataFrame.to_parquet` now accepts ``index`` as an argument, allowing
   the user to override the engine's default behavior to include or omit the
   dataframe's indexes from the resulting Parquet file. (:issue:`20768`)
+- :func:`read_feather` now accepts ``columns`` as an argument, allowing the user to specify which columns should be read. (:issue:`24025`)
 - :meth:`DataFrame.corr` and :meth:`Series.corr` now accept a callable for generic calculation methods of correlation, e.g. histogram intersection (:issue:`22684`)
 - :func:`DataFrame.to_string` now accepts ``decimal`` as an argument, allowing the user to specify which decimal separator should be used in the output. (:issue:`23614`)
-- :func:`read_feather` now accepts ``columns`` as an argument, allowing the user to specify which columns should be read. (:issue:`24025`)
 - :func:`DataFrame.to_html` now accepts ``render_links`` as an argument, allowing the user to generate HTML with links to any URLs that appear in the DataFrame.
   See the :ref:`section on writing HTML <io.html>` in the IO docs for example usage. (:issue:`2679`)
 - :func:`pandas.read_csv` now supports pandas extension types as an argument to ``dtype``, allowing the user to use pandas extension types when reading CSVs. (:issue:`23228`)
-- :meth:`DataFrame.shift` :meth:`Series.shift`, :meth:`ExtensionArray.shift`, :meth:`SparseArray.shift`, :meth:`Period.shift`, :meth:`GroupBy.shift`, :meth:`Categorical.shift`, :meth:`NDFrame.shift` and :meth:`Block.shift` now accept `fill_value` as an argument, allowing the user to specify a value which will be used instead of NA/NaT in the empty periods. (:issue:`15486`)
+- The :meth:`~DataFrame.shift` method now accepts `fill_value` as an argument, allowing the user to specify a value which will be used instead of NA/NaT in the empty periods. (:issue:`15486`)
 - :func:`to_datetime` now supports the ``%Z`` and ``%z`` directive when passed into ``format`` (:issue:`13486`)
 - :func:`Series.mode` and :func:`DataFrame.mode` now support the ``dropna`` parameter which can be used to specify whether ``NaN``/``NaT`` values should be considered (:issue:`17534`)
 - :func:`DataFrame.to_csv` and :func:`Series.to_csv` now support the ``compression`` keyword when a file handle is passed. (:issue:`21227`)
@@ -407,18 +397,19 @@ Other Enhancements
   The default compression for ``to_csv``, ``to_json``, and ``to_pickle`` methods has been updated to ``'infer'`` (:issue:`22004`).
 - :meth:`DataFrame.to_sql` now supports writing ``TIMESTAMP WITH TIME ZONE`` types for supported databases. For databases that don't support timezones, datetime data will be stored as timezone unaware local timestamps. See the :ref:`io.sql_datetime_data` for implications (:issue:`9086`).
 - :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`)
-- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`)
+- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` objects in the constructor (:issue:`2193`)
 - :class:`DatetimeIndex` has gained the :attr:`DatetimeIndex.timetz` attribute. This returns the local time with timezone information. (:issue:`21358`)
-- :meth:`Timestamp.round`, :meth:`Timestamp.ceil`, and :meth:`Timestamp.floor` for :class:`DatetimeIndex` and :class:`Timestamp` now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`)
-- :meth:`Timestamp.round`, :meth:`Timestamp.ceil`, and :meth:`Timestamp.floor` for :class:`DatetimeIndex` and :class:`Timestamp` now support a ``nonexistent`` argument for handling datetimes that are rounded to nonexistent times. See :ref:`timeseries.timezone_nonexistent` (:issue:`22647`)
-- :class:`pandas.core.resample.Resampler` now is iterable like :class:`pandas.core.groupby.GroupBy` (:issue:`15314`).
+- :meth:`~Timestamp.round`, :meth:`~Timestamp.ceil`, and :meth:`~Timestamp.floor` for :class:`DatetimeIndex` and :class:`Timestamp`
+  now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`)
+  and a ``nonexistent`` argument for handling datetimes that are rounded to nonexistent times. See :ref:`timeseries.timezone_nonexistent` (:issue:`22647`)
+- The result of :meth:`~DataFrame.resample` is now iterable similar to ``groupby()`` (:issue:`15314`).
 - :meth:`Series.resample` and :meth:`DataFrame.resample` have gained the :meth:`pandas.core.resample.Resampler.quantile` (:issue:`15023`).
 - :meth:`DataFrame.resample` and :meth:`Series.resample` with a :class:`PeriodIndex` will now respect the ``base`` argument in the same fashion as with a :class:`DatetimeIndex`. (:issue:`23882`)
 - :meth:`pandas.api.types.is_list_like` has gained a keyword ``allow_sets`` which is ``True`` by default; if ``False``,
   all instances of ``set`` will not be considered "list-like" anymore (:issue:`23061`)
 - :meth:`Index.to_frame` now supports overriding column name(s) (:issue:`22580`).
 - :meth:`Categorical.from_codes` now can take a ``dtype`` parameter as an alternative to passing ``categories`` and ``ordered`` (:issue:`24398`).
-- New attribute :attr:`__git_version__` will return git commit sha of current build (:issue:`21295`).
+- New attribute ``__git_version__`` will return git commit sha of current build (:issue:`21295`).
 - Compatibility with Matplotlib 3.0 (:issue:`22790`).
 - Added :meth:`Interval.overlaps`, :meth:`IntervalArray.overlaps`, and :meth:`IntervalIndex.overlaps` for determining overlaps between interval-like objects (:issue:`21998`)
 - :func:`read_fwf` now accepts keyword ``infer_nrows`` (:issue:`15138`).
@@ -433,7 +424,7 @@ Other Enhancements
 - :class:`IntervalIndex` has gained the :attr:`~IntervalIndex.is_overlapping` attribute to indicate if the ``IntervalIndex`` contains any overlapping intervals (:issue:`23309`)
 - :func:`pandas.DataFrame.to_sql` has gained the ``method`` argument to control SQL insertion clause. See the :ref:`insertion method <io.sql.method>` section in the documentation. (:issue:`8953`)
 - :meth:`DataFrame.corrwith` now supports Spearman's rank correlation, Kendall's tau as well as callable correlation methods. (:issue:`21925`)
-- :meth:`DataFrame.to_json`, :meth:`DataFrame.to_csv`, :meth:`DataFrame.to_pickle`, and :meth:`DataFrame.to_XXX` etc. now support tilde(~) in path argument. (:issue:`23473`)
+- :meth:`DataFrame.to_json`, :meth:`DataFrame.to_csv`, :meth:`DataFrame.to_pickle`, and other export methods now support tilde(~) in path argument. (:issue:`23473`)
 
 .. _whatsnew_0240.api_breaking:
 
@@ -445,8 +436,8 @@ Pandas 0.24.0 includes a number of API breaking changes.
 
 .. _whatsnew_0240.api_breaking.deps:
 
-Dependencies have increased minimum versions
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Increased minimum versions for dependencies
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 We have updated our minimum supported versions of dependencies (:issue:`21242`, :issue:`18742`, :issue:`23774`, :issue:`24767`).
 If installed, we now require:
@@ -1174,17 +1165,19 @@ Other API Changes
 
 .. _whatsnew_0240.api.extension:
 
-ExtensionType Changes
-~~~~~~~~~~~~~~~~~~~~~
+Extension Type Changes
+~~~~~~~~~~~~~~~~~~~~~~
 
 **Equality and Hashability**
 
-Pandas now requires that extension dtypes be hashable. The base class implements
+Pandas now requires that extension dtypes be hashable (i.e. the respective
+``ExtensionDtype`` objects; hashability is not a requirement for the values
+of the corresponding ``ExtensionArray``). The base class implements
 a default ``__eq__`` and ``__hash__``. If you have a parametrized dtype, you should
 update the ``ExtensionDtype._metadata`` tuple to match the signature of your
 ``__init__`` method. See :class:`pandas.api.extensions.ExtensionDtype` for more (:issue:`22476`).
 
-**Reshaping changes**
+**New and changed methods**
 
 - :meth:`~pandas.api.types.ExtensionArray.dropna` has been added (:issue:`21185`)
 - :meth:`~pandas.api.types.ExtensionArray.repeat` has been added (:issue:`24349`)
@@ -1202,9 +1195,25 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your
 - Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`)
 - Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`)
 
+.. _whatsnew_0240.enhancements.extension_array_operators:
+
+**Operator support**
+
+A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison
+operators (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``:
+
+1. Define each of the operators on your ``ExtensionArray`` subclass.
+2. Use an operator implementation from pandas that depends on operators that are already defined
+   on the underlying elements (scalars) of the ``ExtensionArray``.
+
+See the :ref:`ExtensionArray Operator Support
+<extending.extension.operator>` documentation section for details on both
+ways of adding operator support.
+
 **Other changes**
 
 - A default repr for :class:`pandas.api.extensions.ExtensionArray` is now provided (:issue:`23601`).
+- :meth:`ExtensionArray._formatting_values` is deprecated. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`)
 - An ``ExtensionArray`` with a boolean dtype now works correctly as a boolean indexer. :meth:`pandas.api.types.is_bool_dtype` now properly considers them boolean (:issue:`22326`)
 
 **Bug Fixes**
@@ -1253,7 +1262,6 @@ Deprecations
 - The methods :meth:`DataFrame.update` and :meth:`Panel.update` have deprecated the ``raise_conflict=False|True`` keyword in favor of ``errors='ignore'|'raise'`` (:issue:`23585`)
 - The methods :meth:`Series.str.partition` and :meth:`Series.str.rpartition` have deprecated the ``pat`` keyword in favor of ``sep`` (:issue:`22676`)
 - Deprecated the ``nthreads`` keyword of :func:`pandas.read_feather` in favor of ``use_threads`` to reflect the changes in ``pyarrow>=0.11.0``. (:issue:`23053`)
-- :meth:`ExtensionArray._formatting_values` is deprecated. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`)
 - :func:`pandas.read_excel` has deprecated accepting ``usecols`` as an integer. Please pass in a list of ints from 0 to ``usecols`` inclusive instead (:issue:`23527`)
 - Constructing a :class:`TimedeltaIndex` from data with ``datetime64``-dtyped data is deprecated, will raise ``TypeError`` in a future version (:issue:`23539`)
 - Constructing a :class:`DatetimeIndex` from data with ``timedelta64``-dtyped data is deprecated, will raise ``TypeError`` in a future version (:issue:`23675`)

From 013ae3daf2b6793478f58e9e88d6d36c308f69b7 Mon Sep 17 00:00:00 2001
From: Marc Garcia <garcia.marc@gmail.com>
Date: Fri, 25 Jan 2019 12:40:37 +0000
Subject: [PATCH 23/48] DOC: Final reorganization of documentation pages
 (#24890)

* DOC: Final reorganization of documentation pages

* Move ecosystem to top level
---
 doc/redirects.csv                             |  5 +
 .../comparison}/comparison_with_r.rst         |  0
 .../comparison}/comparison_with_sas.rst       |  0
 .../comparison}/comparison_with_sql.rst       |  0
 .../comparison}/comparison_with_stata.rst     |  0
 .../getting_started/comparison/index.rst      | 15 +++
 doc/source/getting_started/index.rst          |  1 +
 doc/source/getting_started/overview.rst       | 93 ++++++++++++++----
 doc/source/index.rst.template                 | 96 ++-----------------
 doc/source/r_interface.rst                    | 94 ------------------
 doc/source/{ => user_guide}/cookbook.rst      |  0
 doc/source/user_guide/index.rst               |  1 +
 doc/source/user_guide/style.ipynb             |  2 +-
 .../{ => user_guide}/templates/myhtml.tpl     |  0
 .../templates}/template_structure.html        |  0
 15 files changed, 103 insertions(+), 204 deletions(-)
 rename doc/source/{ => getting_started/comparison}/comparison_with_r.rst (100%)
 rename doc/source/{ => getting_started/comparison}/comparison_with_sas.rst (100%)
 rename doc/source/{ => getting_started/comparison}/comparison_with_sql.rst (100%)
 rename doc/source/{ => getting_started/comparison}/comparison_with_stata.rst (100%)
 create mode 100644 doc/source/getting_started/comparison/index.rst
 delete mode 100644 doc/source/r_interface.rst
 rename doc/source/{ => user_guide}/cookbook.rst (100%)
 rename doc/source/{ => user_guide}/templates/myhtml.tpl (100%)
 rename doc/source/{ => user_guide/templates}/template_structure.html (100%)

diff --git a/doc/redirects.csv b/doc/redirects.csv
index e0de03745aaa8..c4e14359f7f75 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -8,6 +8,10 @@ release,whatsnew/index
 # getting started
 10min,getting_started/10min
 basics,getting_started/basics
+comparison_with_r,getting_started/comparison/comparison_with_r
+comparison_with_sql,getting_started/comparison/comparison_with_sql
+comparison_with_sas,getting_started/comparison/comparison_with_sas
+comparison_with_stata,getting_started/comparison/comparison_with_stata
 dsintro,getting_started/dsintro
 overview,getting_started/overview
 tutorials,getting_started/tutorials
@@ -16,6 +20,7 @@ tutorials,getting_started/tutorials
 advanced,user_guide/advanced
 categorical,user_guide/categorical
 computation,user_guide/computation
+cookbook,user_guide/cookbook
 enhancingperf,user_guide/enhancingperf
 gotchas,user_guide/gotchas
 groupby,user_guide/groupby
diff --git a/doc/source/comparison_with_r.rst b/doc/source/getting_started/comparison/comparison_with_r.rst
similarity index 100%
rename from doc/source/comparison_with_r.rst
rename to doc/source/getting_started/comparison/comparison_with_r.rst
diff --git a/doc/source/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
similarity index 100%
rename from doc/source/comparison_with_sas.rst
rename to doc/source/getting_started/comparison/comparison_with_sas.rst
diff --git a/doc/source/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
similarity index 100%
rename from doc/source/comparison_with_sql.rst
rename to doc/source/getting_started/comparison/comparison_with_sql.rst
diff --git a/doc/source/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
similarity index 100%
rename from doc/source/comparison_with_stata.rst
rename to doc/source/getting_started/comparison/comparison_with_stata.rst
diff --git a/doc/source/getting_started/comparison/index.rst b/doc/source/getting_started/comparison/index.rst
new file mode 100644
index 0000000000000..998706ce0c639
--- /dev/null
+++ b/doc/source/getting_started/comparison/index.rst
@@ -0,0 +1,15 @@
+{{ header }}
+
+.. _comparison:
+
+===========================
+Comparison with other tools
+===========================
+
+.. toctree::
+    :maxdepth: 2
+
+    comparison_with_r
+    comparison_with_sql
+    comparison_with_sas
+    comparison_with_stata
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst
index 116efe79beef1..4c5d26461a667 100644
--- a/doc/source/getting_started/index.rst
+++ b/doc/source/getting_started/index.rst
@@ -13,4 +13,5 @@ Getting started
     10min
     basics
     dsintro
+    comparison/index
     tutorials
diff --git a/doc/source/getting_started/overview.rst b/doc/source/getting_started/overview.rst
index 1e07df47aadca..b531f686951fc 100644
--- a/doc/source/getting_started/overview.rst
+++ b/doc/source/getting_started/overview.rst
@@ -6,25 +6,80 @@
 Package overview
 ****************
 
-:mod:`pandas` is an open source, BSD-licensed library providing high-performance,
-easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__
-programming language.
-
-:mod:`pandas` consists of the following elements:
-
-* A set of labeled array data structures, the primary of which are
-  Series and DataFrame.
-* Index objects enabling both simple axis indexing and multi-level /
-  hierarchical axis indexing.
-* An integrated group by engine for aggregating and transforming data sets.
-* Date range generation (date_range) and custom date offsets enabling the
-  implementation of customized frequencies.
-* Input/Output tools: loading tabular data from flat files (CSV, delimited,
-  Excel 2003), and saving and loading pandas objects from the fast and
-  efficient PyTables/HDF5 format.
-* Memory-efficient "sparse" versions of the standard data structures for storing
-  data that is mostly missing or mostly constant (some fixed value).
-* Moving window statistics (rolling mean, rolling standard deviation, etc.).
+**pandas** is a `Python <https://www.python.org>`__ package providing fast,
+flexible, and expressive data structures designed to make working with
+"relational" or "labeled" data both easy and intuitive. It aims to be the
+fundamental high-level building block for doing practical, **real world** data
+analysis in Python. Additionally, it has the broader goal of becoming **the
+most powerful and flexible open source data analysis / manipulation tool
+available in any language**. It is already well on its way toward this goal.
+
+pandas is well suited for many different kinds of data:
+
+  - Tabular data with heterogeneously-typed columns, as in an SQL table or
+    Excel spreadsheet
+  - Ordered and unordered (not necessarily fixed-frequency) time series data.
+  - Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
+    column labels
+  - Any other form of observational / statistical data sets. The data actually
+    need not be labeled at all to be placed into a pandas data structure
+
+The two primary data structures of pandas, :class:`Series` (1-dimensional)
+and :class:`DataFrame` (2-dimensional), handle the vast majority of typical use
+cases in finance, statistics, social science, and many areas of
+engineering. For R users, :class:`DataFrame` provides everything that R's
+``data.frame`` provides and much more. pandas is built on top of `NumPy
+<https://www.numpy.org>`__ and is intended to integrate well within a scientific
+computing environment with many other 3rd party libraries.
+
+Here are just a few of the things that pandas does well:
+
+  - Easy handling of **missing data** (represented as NaN) in floating point as
+    well as non-floating point data
+  - Size mutability: columns can be **inserted and deleted** from DataFrame and
+    higher dimensional objects
+  - Automatic and explicit **data alignment**: objects can be explicitly
+    aligned to a set of labels, or the user can simply ignore the labels and
+    let `Series`, `DataFrame`, etc. automatically align the data for you in
+    computations
+  - Powerful, flexible **group by** functionality to perform
+    split-apply-combine operations on data sets, for both aggregating and
+    transforming data
+  - Make it **easy to convert** ragged, differently-indexed data in other
+    Python and NumPy data structures into DataFrame objects
+  - Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
+    of large data sets
+  - Intuitive **merging** and **joining** data sets
+  - Flexible **reshaping** and pivoting of data sets
+  - **Hierarchical** labeling of axes (possible to have multiple labels per
+    tick)
+  - Robust IO tools for loading data from **flat files** (CSV and delimited),
+    Excel files, databases, and saving / loading data from the ultrafast **HDF5
+    format**
+  - **Time series**-specific functionality: date range generation and frequency
+    conversion, moving window statistics, moving window linear regressions,
+    date shifting and lagging, etc.
+
+Many of these principles are here to address the shortcomings frequently
+experienced using other languages / scientific research environments. For data
+scientists, working with data is typically divided into multiple stages:
+munging and cleaning data, analyzing / modeling it, then organizing the results
+of the analysis into a form suitable for plotting or tabular display. pandas
+is the ideal tool for all of these tasks.
+
+Some other notes
+
+ - pandas is **fast**. Many of the low-level algorithmic bits have been
+   extensively tweaked in `Cython <https://cython.org>`__ code. However, as with
+   anything else generalization usually sacrifices performance. So if you focus
+   on one feature for your application you may be able to create a faster
+   specialized tool.
+
+ - pandas is a dependency of `statsmodels
+   <https://www.statsmodels.org/stable/index.html>`__, making it an important part of the
+   statistical computing ecosystem in Python.
+
+ - pandas has been used extensively in production in financial applications.
 
 Data Structures
 ---------------
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index bc420a906b59c..ab51911a610e3 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -22,93 +22,15 @@ pandas: powerful Python data analysis toolkit
 
 **Developer Mailing List:** https://groups.google.com/forum/#!forum/pydata
 
-**pandas** is a `Python <https://www.python.org>`__ package providing fast,
-flexible, and expressive data structures designed to make working with
-"relational" or "labeled" data both easy and intuitive. It aims to be the
-fundamental high-level building block for doing practical, **real world** data
-analysis in Python. Additionally, it has the broader goal of becoming **the
-most powerful and flexible open source data analysis / manipulation tool
-available in any language**. It is already well on its way toward this goal.
-
-pandas is well suited for many different kinds of data:
-
-  - Tabular data with heterogeneously-typed columns, as in an SQL table or
-    Excel spreadsheet
-  - Ordered and unordered (not necessarily fixed-frequency) time series data.
-  - Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
-    column labels
-  - Any other form of observational / statistical data sets. The data actually
-    need not be labeled at all to be placed into a pandas data structure
-
-The two primary data structures of pandas, :class:`Series` (1-dimensional)
-and :class:`DataFrame` (2-dimensional), handle the vast majority of typical use
-cases in finance, statistics, social science, and many areas of
-engineering. For R users, :class:`DataFrame` provides everything that R's
-``data.frame`` provides and much more. pandas is built on top of `NumPy
-<https://www.numpy.org>`__ and is intended to integrate well within a scientific
-computing environment with many other 3rd party libraries.
-
-Here are just a few of the things that pandas does well:
-
-  - Easy handling of **missing data** (represented as NaN) in floating point as
-    well as non-floating point data
-  - Size mutability: columns can be **inserted and deleted** from DataFrame and
-    higher dimensional objects
-  - Automatic and explicit **data alignment**: objects can be explicitly
-    aligned to a set of labels, or the user can simply ignore the labels and
-    let `Series`, `DataFrame`, etc. automatically align the data for you in
-    computations
-  - Powerful, flexible **group by** functionality to perform
-    split-apply-combine operations on data sets, for both aggregating and
-    transforming data
-  - Make it **easy to convert** ragged, differently-indexed data in other
-    Python and NumPy data structures into DataFrame objects
-  - Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
-    of large data sets
-  - Intuitive **merging** and **joining** data sets
-  - Flexible **reshaping** and pivoting of data sets
-  - **Hierarchical** labeling of axes (possible to have multiple labels per
-    tick)
-  - Robust IO tools for loading data from **flat files** (CSV and delimited),
-    Excel files, databases, and saving / loading data from the ultrafast **HDF5
-    format**
-  - **Time series**-specific functionality: date range generation and frequency
-    conversion, moving window statistics, moving window linear regressions,
-    date shifting and lagging, etc.
-
-Many of these principles are here to address the shortcomings frequently
-experienced using other languages / scientific research environments. For data
-scientists, working with data is typically divided into multiple stages:
-munging and cleaning data, analyzing / modeling it, then organizing the results
-of the analysis into a form suitable for plotting or tabular display. pandas
-is the ideal tool for all of these tasks.
-
-Some other notes
-
- - pandas is **fast**. Many of the low-level algorithmic bits have been
-   extensively tweaked in `Cython <https://cython.org>`__ code. However, as with
-   anything else generalization usually sacrifices performance. So if you focus
-   on one feature for your application you may be able to create a faster
-   specialized tool.
-
- - pandas is a dependency of `statsmodels
-   <https://www.statsmodels.org/stable/index.html>`__, making it an important part of the
-   statistical computing ecosystem in Python.
-
- - pandas has been used extensively in production in financial applications.
-
-.. note::
-
-   This documentation assumes general familiarity with NumPy. If you haven't
-   used NumPy much or at all, do invest some time in `learning about NumPy
-   <https://docs.scipy.org>`__ first.
-
-See the package overview for more detail about what's in the library.
+:mod:`pandas` is an open source, BSD-licensed library providing high-performance,
+easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__
+programming language.
 
+See the :ref:`overview` for more detail about what's in the library.
 
 {% if single_doc and single_doc.endswith('.rst') -%}
 .. toctree::
-    :maxdepth: 4
+    :maxdepth: 2
 
     {{ single_doc[:-4] }}
 {% elif single_doc %}
@@ -118,21 +40,15 @@ See the package overview for more detail about what's in the library.
     {{ single_doc }}
 {% else -%}
 .. toctree::
-    :maxdepth: 4
+    :maxdepth: 2
 {% endif %}
 
     {% if not single_doc -%}
     What's New <whatsnew/v0.24.0>
     install
     getting_started/index
-    cookbook
     user_guide/index
-    r_interface
     ecosystem
-    comparison_with_r
-    comparison_with_sql
-    comparison_with_sas
-    comparison_with_stata
     {% endif -%}
     {% if include_api -%}
     api/index
diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst
deleted file mode 100644
index 9839bba4884d4..0000000000000
--- a/doc/source/r_interface.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-.. _rpy:
-
-{{ header }}
-
-******************
-rpy2 / R interface
-******************
-
-.. warning::
-
-    Up to pandas 0.19, a ``pandas.rpy`` module existed with functionality to
-    convert between pandas and ``rpy2`` objects. This functionality now lives in
-    the `rpy2 <https://rpy2.readthedocs.io/>`__ project itself.
-    See the `updating section <http://pandas.pydata.org/pandas-docs/version/0.19.0/r_interface.html#updating-your-code-to-use-rpy2-functions>`__
-    of the previous documentation for a guide to port your code from the
-    removed ``pandas.rpy`` to ``rpy2`` functions.
-
-
-`rpy2 <http://rpy2.bitbucket.org/>`__ is an interface to R running embedded in a Python process, and also includes functionality to deal with pandas DataFrames.
-Converting data frames back and forth between rpy2 and pandas should be largely
-automated (no need to convert explicitly, it will be done on the fly in most
-rpy2 functions).
-To convert explicitly, the functions are ``pandas2ri.py2ri()`` and
-``pandas2ri.ri2py()``.
-
-
-See also the documentation of the `rpy2 <http://rpy2.bitbucket.org/>`__ project: https://rpy2.readthedocs.io.
-
-In the remainder of this page, a few examples of explicit conversion is given. The pandas conversion of rpy2 needs first to be activated:
-
-.. ipython::
-    :verbatim:
-
-    In [1]: from rpy2.robjects import pandas2ri
-       ...: pandas2ri.activate()
-
-Transferring R data sets into Python
-------------------------------------
-
-Once the pandas conversion is activated (``pandas2ri.activate()``), many conversions
-of R to pandas objects will be done automatically. For example, to obtain the 'iris' dataset as a pandas DataFrame:
-
-.. ipython::
-    :verbatim:
-
-    In [2]: from rpy2.robjects import r
-
-    In [3]: r.data('iris')
-
-    In [4]: r['iris'].head()
-    Out[4]:
-        Sepal.Length  Sepal.Width  Petal.Length  Petal.Width Species
-    0           5.1          3.5           1.4          0.2  setosa
-    1           4.9          3.0           1.4          0.2  setosa
-    2           4.7          3.2           1.3          0.2  setosa
-    3           4.6          3.1           1.5          0.2  setosa
-    4           5.0          3.6           1.4          0.2  setosa
-
-If the pandas conversion was not activated, the above could also be accomplished
-by explicitly converting it with the ``pandas2ri.ri2py`` function
-(``pandas2ri.ri2py(r['iris'])``).
-
-Converting DataFrames into R objects
-------------------------------------
-
-The ``pandas2ri.py2ri`` function support the reverse operation to convert
-DataFrames into the equivalent R object (that is, **data.frame**):
-
-.. ipython::
-   :verbatim:
-
-   In [5]: df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]},
-      ...:                   index=["one", "two", "three"])
-
-   In [6]: r_dataframe = pandas2ri.py2ri(df)
-
-   In [7]: print(type(r_dataframe))
-   Out[7]: <class 'rpy2.robjects.vectors.DataFrame'>
-
-   In [8]: print(r_dataframe)
-   Out[8]:
-         A B C
-   one   1 4 7
-   two   2 5 8
-   three 3 6 9
-
-
-The DataFrame's index is stored as the ``rownames`` attribute of the
-data.frame instance.
-
-
-..
-   Calling R functions with pandas objects
-   High-level interface to R estimators
diff --git a/doc/source/cookbook.rst b/doc/source/user_guide/cookbook.rst
similarity index 100%
rename from doc/source/cookbook.rst
rename to doc/source/user_guide/cookbook.rst
diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst
index 60e722808d647..d39cf7103ab63 100644
--- a/doc/source/user_guide/index.rst
+++ b/doc/source/user_guide/index.rst
@@ -37,3 +37,4 @@ Further information on any specific method can be obtained in the
     enhancingperf
     sparse
     gotchas
+    cookbook
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index a238c3b16e9ad..79a9848704eec 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -1133,7 +1133,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "with open(\"template_structure.html\") as f:\n",
+    "with open(\"templates/template_structure.html\") as f:\n",
     "    structure = f.read()\n",
     "    \n",
     "HTML(structure)"
diff --git a/doc/source/templates/myhtml.tpl b/doc/source/user_guide/templates/myhtml.tpl
similarity index 100%
rename from doc/source/templates/myhtml.tpl
rename to doc/source/user_guide/templates/myhtml.tpl
diff --git a/doc/source/template_structure.html b/doc/source/user_guide/templates/template_structure.html
similarity index 100%
rename from doc/source/template_structure.html
rename to doc/source/user_guide/templates/template_structure.html

From 85982ed73a30a66a3138652b4454bb04d29bf7a9 Mon Sep 17 00:00:00 2001
From: Marc Garcia <garcia.marc@gmail.com>
Date: Fri, 25 Jan 2019 13:07:03 +0000
Subject: [PATCH 24/48] DOC: Adding redirects to API moved pages (#24909)

* DOC: Adding redirects to API moved pages
---
 .gitignore                                    |    2 +-
 doc/make.py                                   |    6 +-
 doc/redirects.csv                             | 1535 +++++++++++++++++
 doc/source/index.rst.template                 |    4 +-
 doc/source/{api => reference}/arrays.rst      |   48 +-
 doc/source/{api => reference}/extensions.rst  |    2 +-
 doc/source/{api => reference}/frame.rst       |   36 +-
 .../{api => reference}/general_functions.rst  |   16 +-
 .../general_utility_functions.rst             |   14 +-
 doc/source/{api => reference}/groupby.rst     |   14 +-
 doc/source/{api => reference}/index.rst       |   52 +-
 doc/source/{api => reference}/indexing.rst    |   68 +-
 doc/source/{api => reference}/io.rst          |   32 +-
 .../{api => reference}/offset_frequency.rst   |  272 +--
 doc/source/{api => reference}/panel.rst       |   28 +-
 doc/source/{api => reference}/plotting.rst    |    2 +-
 doc/source/{api => reference}/resampling.rst  |    8 +-
 doc/source/{api => reference}/series.rst      |   58 +-
 doc/source/{api => reference}/style.rst       |   10 +-
 doc/source/{api => reference}/window.rst      |    6 +-
 doc/source/user_guide/io.rst                  |    2 +-
 scripts/validate_docstrings.py                |    3 +-
 22 files changed, 1877 insertions(+), 341 deletions(-)
 rename doc/source/{api => reference}/arrays.rst (93%)
 rename doc/source/{api => reference}/extensions.rst (95%)
 rename doc/source/{api => reference}/frame.rst (93%)
 rename doc/source/{api => reference}/general_functions.rst (84%)
 rename doc/source/{api => reference}/general_utility_functions.rst (93%)
 rename doc/source/{api => reference}/groupby.rst (94%)
 rename doc/source/{api => reference}/index.rst (56%)
 rename doc/source/{api => reference}/indexing.rst (91%)
 rename doc/source/{api => reference}/io.rst (78%)
 rename doc/source/{api => reference}/offset_frequency.rst (84%)
 rename doc/source/{api => reference}/panel.rst (90%)
 rename doc/source/{api => reference}/plotting.rst (93%)
 rename doc/source/{api => reference}/resampling.rst (91%)
 rename doc/source/{api => reference}/series.rst (93%)
 rename doc/source/{api => reference}/style.rst (88%)
 rename doc/source/{api => reference}/window.rst (95%)

diff --git a/.gitignore b/.gitignore
index 4598714db6c6a..9891883879cf1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -101,7 +101,7 @@ asv_bench/pandas/
 # Documentation generated files #
 #################################
 doc/source/generated
-doc/source/api/generated
+doc/source/reference/api
 doc/source/_static
 doc/source/vbench
 doc/source/vbench.rst
diff --git a/doc/make.py b/doc/make.py
index bc458d6b53cb0..438c4a04a3f08 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -53,7 +53,7 @@ def __init__(self, num_jobs=0, include_api=True, single_doc=None,
         if single_doc and single_doc.endswith('.rst'):
             self.single_doc_html = os.path.splitext(single_doc)[0] + '.html'
         elif single_doc:
-            self.single_doc_html = 'api/generated/pandas.{}.html'.format(
+            self.single_doc_html = 'reference/api/pandas.{}.html'.format(
                 single_doc)
 
     def _process_single_doc(self, single_doc):
@@ -63,7 +63,7 @@ def _process_single_doc(self, single_doc):
 
         For example, categorial.rst or pandas.DataFrame.head. For the latter,
         return the corresponding file path
-        (e.g. generated/pandas.DataFrame.head.rst).
+        (e.g. reference/api/pandas.DataFrame.head.rst).
         """
         base_name, extension = os.path.splitext(single_doc)
         if extension in ('.rst', '.ipynb'):
@@ -258,7 +258,7 @@ def clean():
         Clean documentation generated files.
         """
         shutil.rmtree(BUILD_PATH, ignore_errors=True)
-        shutil.rmtree(os.path.join(SOURCE_PATH, 'api', 'generated'),
+        shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'),
                       ignore_errors=True)
 
     def zip_html(self):
diff --git a/doc/redirects.csv b/doc/redirects.csv
index c4e14359f7f75..a7886779c97d5 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -44,3 +44,1538 @@ contributing_docstring,development/contributing_docstring
 developer,development/developer
 extending,development/extending
 internals,development/internals
+
+# api
+api,reference/index
+generated/pandas.api.extensions.ExtensionArray.argsort,../reference/api/pandas.api.extensions.ExtensionArray.argsort
+generated/pandas.api.extensions.ExtensionArray.astype,../reference/api/pandas.api.extensions.ExtensionArray.astype
+generated/pandas.api.extensions.ExtensionArray.copy,../reference/api/pandas.api.extensions.ExtensionArray.copy
+generated/pandas.api.extensions.ExtensionArray.dropna,../reference/api/pandas.api.extensions.ExtensionArray.dropna
+generated/pandas.api.extensions.ExtensionArray.dtype,../reference/api/pandas.api.extensions.ExtensionArray.dtype
+generated/pandas.api.extensions.ExtensionArray.factorize,../reference/api/pandas.api.extensions.ExtensionArray.factorize
+generated/pandas.api.extensions.ExtensionArray.fillna,../reference/api/pandas.api.extensions.ExtensionArray.fillna
+generated/pandas.api.extensions.ExtensionArray,../reference/api/pandas.api.extensions.ExtensionArray
+generated/pandas.api.extensions.ExtensionArray.isna,../reference/api/pandas.api.extensions.ExtensionArray.isna
+generated/pandas.api.extensions.ExtensionArray.nbytes,../reference/api/pandas.api.extensions.ExtensionArray.nbytes
+generated/pandas.api.extensions.ExtensionArray.ndim,../reference/api/pandas.api.extensions.ExtensionArray.ndim
+generated/pandas.api.extensions.ExtensionArray.shape,../reference/api/pandas.api.extensions.ExtensionArray.shape
+generated/pandas.api.extensions.ExtensionArray.take,../reference/api/pandas.api.extensions.ExtensionArray.take
+generated/pandas.api.extensions.ExtensionArray.unique,../reference/api/pandas.api.extensions.ExtensionArray.unique
+generated/pandas.api.extensions.ExtensionDtype.construct_array_type,../reference/api/pandas.api.extensions.ExtensionDtype.construct_array_type
+generated/pandas.api.extensions.ExtensionDtype.construct_from_string,../reference/api/pandas.api.extensions.ExtensionDtype.construct_from_string
+generated/pandas.api.extensions.ExtensionDtype,../reference/api/pandas.api.extensions.ExtensionDtype
+generated/pandas.api.extensions.ExtensionDtype.is_dtype,../reference/api/pandas.api.extensions.ExtensionDtype.is_dtype
+generated/pandas.api.extensions.ExtensionDtype.kind,../reference/api/pandas.api.extensions.ExtensionDtype.kind
+generated/pandas.api.extensions.ExtensionDtype.name,../reference/api/pandas.api.extensions.ExtensionDtype.name
+generated/pandas.api.extensions.ExtensionDtype.names,../reference/api/pandas.api.extensions.ExtensionDtype.names
+generated/pandas.api.extensions.ExtensionDtype.na_value,../reference/api/pandas.api.extensions.ExtensionDtype.na_value
+generated/pandas.api.extensions.ExtensionDtype.type,../reference/api/pandas.api.extensions.ExtensionDtype.type
+generated/pandas.api.extensions.register_dataframe_accessor,../reference/api/pandas.api.extensions.register_dataframe_accessor
+generated/pandas.api.extensions.register_extension_dtype,../reference/api/pandas.api.extensions.register_extension_dtype
+generated/pandas.api.extensions.register_index_accessor,../reference/api/pandas.api.extensions.register_index_accessor
+generated/pandas.api.extensions.register_series_accessor,../reference/api/pandas.api.extensions.register_series_accessor
+generated/pandas.api.types.infer_dtype,../reference/api/pandas.api.types.infer_dtype
+generated/pandas.api.types.is_bool_dtype,../reference/api/pandas.api.types.is_bool_dtype
+generated/pandas.api.types.is_bool,../reference/api/pandas.api.types.is_bool
+generated/pandas.api.types.is_categorical_dtype,../reference/api/pandas.api.types.is_categorical_dtype
+generated/pandas.api.types.is_categorical,../reference/api/pandas.api.types.is_categorical
+generated/pandas.api.types.is_complex_dtype,../reference/api/pandas.api.types.is_complex_dtype
+generated/pandas.api.types.is_complex,../reference/api/pandas.api.types.is_complex
+generated/pandas.api.types.is_datetime64_any_dtype,../reference/api/pandas.api.types.is_datetime64_any_dtype
+generated/pandas.api.types.is_datetime64_dtype,../reference/api/pandas.api.types.is_datetime64_dtype
+generated/pandas.api.types.is_datetime64_ns_dtype,../reference/api/pandas.api.types.is_datetime64_ns_dtype
+generated/pandas.api.types.is_datetime64tz_dtype,../reference/api/pandas.api.types.is_datetime64tz_dtype
+generated/pandas.api.types.is_datetimetz,../reference/api/pandas.api.types.is_datetimetz
+generated/pandas.api.types.is_dict_like,../reference/api/pandas.api.types.is_dict_like
+generated/pandas.api.types.is_extension_array_dtype,../reference/api/pandas.api.types.is_extension_array_dtype
+generated/pandas.api.types.is_extension_type,../reference/api/pandas.api.types.is_extension_type
+generated/pandas.api.types.is_file_like,../reference/api/pandas.api.types.is_file_like
+generated/pandas.api.types.is_float_dtype,../reference/api/pandas.api.types.is_float_dtype
+generated/pandas.api.types.is_float,../reference/api/pandas.api.types.is_float
+generated/pandas.api.types.is_hashable,../reference/api/pandas.api.types.is_hashable
+generated/pandas.api.types.is_int64_dtype,../reference/api/pandas.api.types.is_int64_dtype
+generated/pandas.api.types.is_integer_dtype,../reference/api/pandas.api.types.is_integer_dtype
+generated/pandas.api.types.is_integer,../reference/api/pandas.api.types.is_integer
+generated/pandas.api.types.is_interval_dtype,../reference/api/pandas.api.types.is_interval_dtype
+generated/pandas.api.types.is_interval,../reference/api/pandas.api.types.is_interval
+generated/pandas.api.types.is_iterator,../reference/api/pandas.api.types.is_iterator
+generated/pandas.api.types.is_list_like,../reference/api/pandas.api.types.is_list_like
+generated/pandas.api.types.is_named_tuple,../reference/api/pandas.api.types.is_named_tuple
+generated/pandas.api.types.is_number,../reference/api/pandas.api.types.is_number
+generated/pandas.api.types.is_numeric_dtype,../reference/api/pandas.api.types.is_numeric_dtype
+generated/pandas.api.types.is_object_dtype,../reference/api/pandas.api.types.is_object_dtype
+generated/pandas.api.types.is_period_dtype,../reference/api/pandas.api.types.is_period_dtype
+generated/pandas.api.types.is_period,../reference/api/pandas.api.types.is_period
+generated/pandas.api.types.is_re_compilable,../reference/api/pandas.api.types.is_re_compilable
+generated/pandas.api.types.is_re,../reference/api/pandas.api.types.is_re
+generated/pandas.api.types.is_scalar,../reference/api/pandas.api.types.is_scalar
+generated/pandas.api.types.is_signed_integer_dtype,../reference/api/pandas.api.types.is_signed_integer_dtype
+generated/pandas.api.types.is_sparse,../reference/api/pandas.api.types.is_sparse
+generated/pandas.api.types.is_string_dtype,../reference/api/pandas.api.types.is_string_dtype
+generated/pandas.api.types.is_timedelta64_dtype,../reference/api/pandas.api.types.is_timedelta64_dtype
+generated/pandas.api.types.is_timedelta64_ns_dtype,../reference/api/pandas.api.types.is_timedelta64_ns_dtype
+generated/pandas.api.types.is_unsigned_integer_dtype,../reference/api/pandas.api.types.is_unsigned_integer_dtype
+generated/pandas.api.types.pandas_dtype,../reference/api/pandas.api.types.pandas_dtype
+generated/pandas.api.types.union_categoricals,../reference/api/pandas.api.types.union_categoricals
+generated/pandas.bdate_range,../reference/api/pandas.bdate_range
+generated/pandas.Categorical.__array__,../reference/api/pandas.Categorical.__array__
+generated/pandas.Categorical.categories,../reference/api/pandas.Categorical.categories
+generated/pandas.Categorical.codes,../reference/api/pandas.Categorical.codes
+generated/pandas.CategoricalDtype.categories,../reference/api/pandas.CategoricalDtype.categories
+generated/pandas.Categorical.dtype,../reference/api/pandas.Categorical.dtype
+generated/pandas.CategoricalDtype,../reference/api/pandas.CategoricalDtype
+generated/pandas.CategoricalDtype.ordered,../reference/api/pandas.CategoricalDtype.ordered
+generated/pandas.Categorical.from_codes,../reference/api/pandas.Categorical.from_codes
+generated/pandas.Categorical,../reference/api/pandas.Categorical
+generated/pandas.CategoricalIndex.add_categories,../reference/api/pandas.CategoricalIndex.add_categories
+generated/pandas.CategoricalIndex.as_ordered,../reference/api/pandas.CategoricalIndex.as_ordered
+generated/pandas.CategoricalIndex.as_unordered,../reference/api/pandas.CategoricalIndex.as_unordered
+generated/pandas.CategoricalIndex.categories,../reference/api/pandas.CategoricalIndex.categories
+generated/pandas.CategoricalIndex.codes,../reference/api/pandas.CategoricalIndex.codes
+generated/pandas.CategoricalIndex.equals,../reference/api/pandas.CategoricalIndex.equals
+generated/pandas.CategoricalIndex,../reference/api/pandas.CategoricalIndex
+generated/pandas.CategoricalIndex.map,../reference/api/pandas.CategoricalIndex.map
+generated/pandas.CategoricalIndex.ordered,../reference/api/pandas.CategoricalIndex.ordered
+generated/pandas.CategoricalIndex.remove_categories,../reference/api/pandas.CategoricalIndex.remove_categories
+generated/pandas.CategoricalIndex.remove_unused_categories,../reference/api/pandas.CategoricalIndex.remove_unused_categories
+generated/pandas.CategoricalIndex.rename_categories,../reference/api/pandas.CategoricalIndex.rename_categories
+generated/pandas.CategoricalIndex.reorder_categories,../reference/api/pandas.CategoricalIndex.reorder_categories
+generated/pandas.CategoricalIndex.set_categories,../reference/api/pandas.CategoricalIndex.set_categories
+generated/pandas.Categorical.ordered,../reference/api/pandas.Categorical.ordered
+generated/pandas.concat,../reference/api/pandas.concat
+generated/pandas.core.groupby.DataFrameGroupBy.all,../reference/api/pandas.core.groupby.DataFrameGroupBy.all
+generated/pandas.core.groupby.DataFrameGroupBy.any,../reference/api/pandas.core.groupby.DataFrameGroupBy.any
+generated/pandas.core.groupby.DataFrameGroupBy.bfill,../reference/api/pandas.core.groupby.DataFrameGroupBy.bfill
+generated/pandas.core.groupby.DataFrameGroupBy.boxplot,../reference/api/pandas.core.groupby.DataFrameGroupBy.boxplot
+generated/pandas.core.groupby.DataFrameGroupBy.corr,../reference/api/pandas.core.groupby.DataFrameGroupBy.corr
+generated/pandas.core.groupby.DataFrameGroupBy.corrwith,../reference/api/pandas.core.groupby.DataFrameGroupBy.corrwith
+generated/pandas.core.groupby.DataFrameGroupBy.count,../reference/api/pandas.core.groupby.DataFrameGroupBy.count
+generated/pandas.core.groupby.DataFrameGroupBy.cov,../reference/api/pandas.core.groupby.DataFrameGroupBy.cov
+generated/pandas.core.groupby.DataFrameGroupBy.cummax,../reference/api/pandas.core.groupby.DataFrameGroupBy.cummax
+generated/pandas.core.groupby.DataFrameGroupBy.cummin,../reference/api/pandas.core.groupby.DataFrameGroupBy.cummin
+generated/pandas.core.groupby.DataFrameGroupBy.cumprod,../reference/api/pandas.core.groupby.DataFrameGroupBy.cumprod
+generated/pandas.core.groupby.DataFrameGroupBy.cumsum,../reference/api/pandas.core.groupby.DataFrameGroupBy.cumsum
+generated/pandas.core.groupby.DataFrameGroupBy.describe,../reference/api/pandas.core.groupby.DataFrameGroupBy.describe
+generated/pandas.core.groupby.DataFrameGroupBy.diff,../reference/api/pandas.core.groupby.DataFrameGroupBy.diff
+generated/pandas.core.groupby.DataFrameGroupBy.ffill,../reference/api/pandas.core.groupby.DataFrameGroupBy.ffill
+generated/pandas.core.groupby.DataFrameGroupBy.fillna,../reference/api/pandas.core.groupby.DataFrameGroupBy.fillna
+generated/pandas.core.groupby.DataFrameGroupBy.filter,../reference/api/pandas.core.groupby.DataFrameGroupBy.filter
+generated/pandas.core.groupby.DataFrameGroupBy.hist,../reference/api/pandas.core.groupby.DataFrameGroupBy.hist
+generated/pandas.core.groupby.DataFrameGroupBy.idxmax,../reference/api/pandas.core.groupby.DataFrameGroupBy.idxmax
+generated/pandas.core.groupby.DataFrameGroupBy.idxmin,../reference/api/pandas.core.groupby.DataFrameGroupBy.idxmin
+generated/pandas.core.groupby.DataFrameGroupBy.mad,../reference/api/pandas.core.groupby.DataFrameGroupBy.mad
+generated/pandas.core.groupby.DataFrameGroupBy.pct_change,../reference/api/pandas.core.groupby.DataFrameGroupBy.pct_change
+generated/pandas.core.groupby.DataFrameGroupBy.plot,../reference/api/pandas.core.groupby.DataFrameGroupBy.plot
+generated/pandas.core.groupby.DataFrameGroupBy.quantile,../reference/api/pandas.core.groupby.DataFrameGroupBy.quantile
+generated/pandas.core.groupby.DataFrameGroupBy.rank,../reference/api/pandas.core.groupby.DataFrameGroupBy.rank
+generated/pandas.core.groupby.DataFrameGroupBy.resample,../reference/api/pandas.core.groupby.DataFrameGroupBy.resample
+generated/pandas.core.groupby.DataFrameGroupBy.shift,../reference/api/pandas.core.groupby.DataFrameGroupBy.shift
+generated/pandas.core.groupby.DataFrameGroupBy.size,../reference/api/pandas.core.groupby.DataFrameGroupBy.size
+generated/pandas.core.groupby.DataFrameGroupBy.skew,../reference/api/pandas.core.groupby.DataFrameGroupBy.skew
+generated/pandas.core.groupby.DataFrameGroupBy.take,../reference/api/pandas.core.groupby.DataFrameGroupBy.take
+generated/pandas.core.groupby.DataFrameGroupBy.tshift,../reference/api/pandas.core.groupby.DataFrameGroupBy.tshift
+generated/pandas.core.groupby.GroupBy.agg,../reference/api/pandas.core.groupby.GroupBy.agg
+generated/pandas.core.groupby.GroupBy.aggregate,../reference/api/pandas.core.groupby.GroupBy.aggregate
+generated/pandas.core.groupby.GroupBy.all,../reference/api/pandas.core.groupby.GroupBy.all
+generated/pandas.core.groupby.GroupBy.any,../reference/api/pandas.core.groupby.GroupBy.any
+generated/pandas.core.groupby.GroupBy.apply,../reference/api/pandas.core.groupby.GroupBy.apply
+generated/pandas.core.groupby.GroupBy.bfill,../reference/api/pandas.core.groupby.GroupBy.bfill
+generated/pandas.core.groupby.GroupBy.count,../reference/api/pandas.core.groupby.GroupBy.count
+generated/pandas.core.groupby.GroupBy.cumcount,../reference/api/pandas.core.groupby.GroupBy.cumcount
+generated/pandas.core.groupby.GroupBy.ffill,../reference/api/pandas.core.groupby.GroupBy.ffill
+generated/pandas.core.groupby.GroupBy.first,../reference/api/pandas.core.groupby.GroupBy.first
+generated/pandas.core.groupby.GroupBy.get_group,../reference/api/pandas.core.groupby.GroupBy.get_group
+generated/pandas.core.groupby.GroupBy.groups,../reference/api/pandas.core.groupby.GroupBy.groups
+generated/pandas.core.groupby.GroupBy.head,../reference/api/pandas.core.groupby.GroupBy.head
+generated/pandas.core.groupby.GroupBy.indices,../reference/api/pandas.core.groupby.GroupBy.indices
+generated/pandas.core.groupby.GroupBy.__iter__,../reference/api/pandas.core.groupby.GroupBy.__iter__
+generated/pandas.core.groupby.GroupBy.last,../reference/api/pandas.core.groupby.GroupBy.last
+generated/pandas.core.groupby.GroupBy.max,../reference/api/pandas.core.groupby.GroupBy.max
+generated/pandas.core.groupby.GroupBy.mean,../reference/api/pandas.core.groupby.GroupBy.mean
+generated/pandas.core.groupby.GroupBy.median,../reference/api/pandas.core.groupby.GroupBy.median
+generated/pandas.core.groupby.GroupBy.min,../reference/api/pandas.core.groupby.GroupBy.min
+generated/pandas.core.groupby.GroupBy.ngroup,../reference/api/pandas.core.groupby.GroupBy.ngroup
+generated/pandas.core.groupby.GroupBy.nth,../reference/api/pandas.core.groupby.GroupBy.nth
+generated/pandas.core.groupby.GroupBy.ohlc,../reference/api/pandas.core.groupby.GroupBy.ohlc
+generated/pandas.core.groupby.GroupBy.pct_change,../reference/api/pandas.core.groupby.GroupBy.pct_change
+generated/pandas.core.groupby.GroupBy.pipe,../reference/api/pandas.core.groupby.GroupBy.pipe
+generated/pandas.core.groupby.GroupBy.prod,../reference/api/pandas.core.groupby.GroupBy.prod
+generated/pandas.core.groupby.GroupBy.rank,../reference/api/pandas.core.groupby.GroupBy.rank
+generated/pandas.core.groupby.GroupBy.sem,../reference/api/pandas.core.groupby.GroupBy.sem
+generated/pandas.core.groupby.GroupBy.size,../reference/api/pandas.core.groupby.GroupBy.size
+generated/pandas.core.groupby.GroupBy.std,../reference/api/pandas.core.groupby.GroupBy.std
+generated/pandas.core.groupby.GroupBy.sum,../reference/api/pandas.core.groupby.GroupBy.sum
+generated/pandas.core.groupby.GroupBy.tail,../reference/api/pandas.core.groupby.GroupBy.tail
+generated/pandas.core.groupby.GroupBy.transform,../reference/api/pandas.core.groupby.GroupBy.transform
+generated/pandas.core.groupby.GroupBy.var,../reference/api/pandas.core.groupby.GroupBy.var
+generated/pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing,../reference/api/pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing
+generated/pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing,../reference/api/pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing
+generated/pandas.core.groupby.SeriesGroupBy.nlargest,../reference/api/pandas.core.groupby.SeriesGroupBy.nlargest
+generated/pandas.core.groupby.SeriesGroupBy.nsmallest,../reference/api/pandas.core.groupby.SeriesGroupBy.nsmallest
+generated/pandas.core.groupby.SeriesGroupBy.nunique,../reference/api/pandas.core.groupby.SeriesGroupBy.nunique
+generated/pandas.core.groupby.SeriesGroupBy.unique,../reference/api/pandas.core.groupby.SeriesGroupBy.unique
+generated/pandas.core.groupby.SeriesGroupBy.value_counts,../reference/api/pandas.core.groupby.SeriesGroupBy.value_counts
+generated/pandas.core.resample.Resampler.aggregate,../reference/api/pandas.core.resample.Resampler.aggregate
+generated/pandas.core.resample.Resampler.apply,../reference/api/pandas.core.resample.Resampler.apply
+generated/pandas.core.resample.Resampler.asfreq,../reference/api/pandas.core.resample.Resampler.asfreq
+generated/pandas.core.resample.Resampler.backfill,../reference/api/pandas.core.resample.Resampler.backfill
+generated/pandas.core.resample.Resampler.bfill,../reference/api/pandas.core.resample.Resampler.bfill
+generated/pandas.core.resample.Resampler.count,../reference/api/pandas.core.resample.Resampler.count
+generated/pandas.core.resample.Resampler.ffill,../reference/api/pandas.core.resample.Resampler.ffill
+generated/pandas.core.resample.Resampler.fillna,../reference/api/pandas.core.resample.Resampler.fillna
+generated/pandas.core.resample.Resampler.first,../reference/api/pandas.core.resample.Resampler.first
+generated/pandas.core.resample.Resampler.get_group,../reference/api/pandas.core.resample.Resampler.get_group
+generated/pandas.core.resample.Resampler.groups,../reference/api/pandas.core.resample.Resampler.groups
+generated/pandas.core.resample.Resampler.indices,../reference/api/pandas.core.resample.Resampler.indices
+generated/pandas.core.resample.Resampler.interpolate,../reference/api/pandas.core.resample.Resampler.interpolate
+generated/pandas.core.resample.Resampler.__iter__,../reference/api/pandas.core.resample.Resampler.__iter__
+generated/pandas.core.resample.Resampler.last,../reference/api/pandas.core.resample.Resampler.last
+generated/pandas.core.resample.Resampler.max,../reference/api/pandas.core.resample.Resampler.max
+generated/pandas.core.resample.Resampler.mean,../reference/api/pandas.core.resample.Resampler.mean
+generated/pandas.core.resample.Resampler.median,../reference/api/pandas.core.resample.Resampler.median
+generated/pandas.core.resample.Resampler.min,../reference/api/pandas.core.resample.Resampler.min
+generated/pandas.core.resample.Resampler.nearest,../reference/api/pandas.core.resample.Resampler.nearest
+generated/pandas.core.resample.Resampler.nunique,../reference/api/pandas.core.resample.Resampler.nunique
+generated/pandas.core.resample.Resampler.ohlc,../reference/api/pandas.core.resample.Resampler.ohlc
+generated/pandas.core.resample.Resampler.pad,../reference/api/pandas.core.resample.Resampler.pad
+generated/pandas.core.resample.Resampler.pipe,../reference/api/pandas.core.resample.Resampler.pipe
+generated/pandas.core.resample.Resampler.prod,../reference/api/pandas.core.resample.Resampler.prod
+generated/pandas.core.resample.Resampler.quantile,../reference/api/pandas.core.resample.Resampler.quantile
+generated/pandas.core.resample.Resampler.sem,../reference/api/pandas.core.resample.Resampler.sem
+generated/pandas.core.resample.Resampler.size,../reference/api/pandas.core.resample.Resampler.size
+generated/pandas.core.resample.Resampler.std,../reference/api/pandas.core.resample.Resampler.std
+generated/pandas.core.resample.Resampler.sum,../reference/api/pandas.core.resample.Resampler.sum
+generated/pandas.core.resample.Resampler.transform,../reference/api/pandas.core.resample.Resampler.transform
+generated/pandas.core.resample.Resampler.var,../reference/api/pandas.core.resample.Resampler.var
+generated/pandas.core.window.EWM.corr,../reference/api/pandas.core.window.EWM.corr
+generated/pandas.core.window.EWM.cov,../reference/api/pandas.core.window.EWM.cov
+generated/pandas.core.window.EWM.mean,../reference/api/pandas.core.window.EWM.mean
+generated/pandas.core.window.EWM.std,../reference/api/pandas.core.window.EWM.std
+generated/pandas.core.window.EWM.var,../reference/api/pandas.core.window.EWM.var
+generated/pandas.core.window.Expanding.aggregate,../reference/api/pandas.core.window.Expanding.aggregate
+generated/pandas.core.window.Expanding.apply,../reference/api/pandas.core.window.Expanding.apply
+generated/pandas.core.window.Expanding.corr,../reference/api/pandas.core.window.Expanding.corr
+generated/pandas.core.window.Expanding.count,../reference/api/pandas.core.window.Expanding.count
+generated/pandas.core.window.Expanding.cov,../reference/api/pandas.core.window.Expanding.cov
+generated/pandas.core.window.Expanding.kurt,../reference/api/pandas.core.window.Expanding.kurt
+generated/pandas.core.window.Expanding.max,../reference/api/pandas.core.window.Expanding.max
+generated/pandas.core.window.Expanding.mean,../reference/api/pandas.core.window.Expanding.mean
+generated/pandas.core.window.Expanding.median,../reference/api/pandas.core.window.Expanding.median
+generated/pandas.core.window.Expanding.min,../reference/api/pandas.core.window.Expanding.min
+generated/pandas.core.window.Expanding.quantile,../reference/api/pandas.core.window.Expanding.quantile
+generated/pandas.core.window.Expanding.skew,../reference/api/pandas.core.window.Expanding.skew
+generated/pandas.core.window.Expanding.std,../reference/api/pandas.core.window.Expanding.std
+generated/pandas.core.window.Expanding.sum,../reference/api/pandas.core.window.Expanding.sum
+generated/pandas.core.window.Expanding.var,../reference/api/pandas.core.window.Expanding.var
+generated/pandas.core.window.Rolling.aggregate,../reference/api/pandas.core.window.Rolling.aggregate
+generated/pandas.core.window.Rolling.apply,../reference/api/pandas.core.window.Rolling.apply
+generated/pandas.core.window.Rolling.corr,../reference/api/pandas.core.window.Rolling.corr
+generated/pandas.core.window.Rolling.count,../reference/api/pandas.core.window.Rolling.count
+generated/pandas.core.window.Rolling.cov,../reference/api/pandas.core.window.Rolling.cov
+generated/pandas.core.window.Rolling.kurt,../reference/api/pandas.core.window.Rolling.kurt
+generated/pandas.core.window.Rolling.max,../reference/api/pandas.core.window.Rolling.max
+generated/pandas.core.window.Rolling.mean,../reference/api/pandas.core.window.Rolling.mean
+generated/pandas.core.window.Rolling.median,../reference/api/pandas.core.window.Rolling.median
+generated/pandas.core.window.Rolling.min,../reference/api/pandas.core.window.Rolling.min
+generated/pandas.core.window.Rolling.quantile,../reference/api/pandas.core.window.Rolling.quantile
+generated/pandas.core.window.Rolling.skew,../reference/api/pandas.core.window.Rolling.skew
+generated/pandas.core.window.Rolling.std,../reference/api/pandas.core.window.Rolling.std
+generated/pandas.core.window.Rolling.sum,../reference/api/pandas.core.window.Rolling.sum
+generated/pandas.core.window.Rolling.var,../reference/api/pandas.core.window.Rolling.var
+generated/pandas.core.window.Window.mean,../reference/api/pandas.core.window.Window.mean
+generated/pandas.core.window.Window.sum,../reference/api/pandas.core.window.Window.sum
+generated/pandas.crosstab,../reference/api/pandas.crosstab
+generated/pandas.cut,../reference/api/pandas.cut
+generated/pandas.DataFrame.abs,../reference/api/pandas.DataFrame.abs
+generated/pandas.DataFrame.add,../reference/api/pandas.DataFrame.add
+generated/pandas.DataFrame.add_prefix,../reference/api/pandas.DataFrame.add_prefix
+generated/pandas.DataFrame.add_suffix,../reference/api/pandas.DataFrame.add_suffix
+generated/pandas.DataFrame.agg,../reference/api/pandas.DataFrame.agg
+generated/pandas.DataFrame.aggregate,../reference/api/pandas.DataFrame.aggregate
+generated/pandas.DataFrame.align,../reference/api/pandas.DataFrame.align
+generated/pandas.DataFrame.all,../reference/api/pandas.DataFrame.all
+generated/pandas.DataFrame.any,../reference/api/pandas.DataFrame.any
+generated/pandas.DataFrame.append,../reference/api/pandas.DataFrame.append
+generated/pandas.DataFrame.apply,../reference/api/pandas.DataFrame.apply
+generated/pandas.DataFrame.applymap,../reference/api/pandas.DataFrame.applymap
+generated/pandas.DataFrame.as_blocks,../reference/api/pandas.DataFrame.as_blocks
+generated/pandas.DataFrame.asfreq,../reference/api/pandas.DataFrame.asfreq
+generated/pandas.DataFrame.as_matrix,../reference/api/pandas.DataFrame.as_matrix
+generated/pandas.DataFrame.asof,../reference/api/pandas.DataFrame.asof
+generated/pandas.DataFrame.assign,../reference/api/pandas.DataFrame.assign
+generated/pandas.DataFrame.astype,../reference/api/pandas.DataFrame.astype
+generated/pandas.DataFrame.at,../reference/api/pandas.DataFrame.at
+generated/pandas.DataFrame.at_time,../reference/api/pandas.DataFrame.at_time
+generated/pandas.DataFrame.axes,../reference/api/pandas.DataFrame.axes
+generated/pandas.DataFrame.between_time,../reference/api/pandas.DataFrame.between_time
+generated/pandas.DataFrame.bfill,../reference/api/pandas.DataFrame.bfill
+generated/pandas.DataFrame.blocks,../reference/api/pandas.DataFrame.blocks
+generated/pandas.DataFrame.bool,../reference/api/pandas.DataFrame.bool
+generated/pandas.DataFrame.boxplot,../reference/api/pandas.DataFrame.boxplot
+generated/pandas.DataFrame.clip,../reference/api/pandas.DataFrame.clip
+generated/pandas.DataFrame.clip_lower,../reference/api/pandas.DataFrame.clip_lower
+generated/pandas.DataFrame.clip_upper,../reference/api/pandas.DataFrame.clip_upper
+generated/pandas.DataFrame.columns,../reference/api/pandas.DataFrame.columns
+generated/pandas.DataFrame.combine_first,../reference/api/pandas.DataFrame.combine_first
+generated/pandas.DataFrame.combine,../reference/api/pandas.DataFrame.combine
+generated/pandas.DataFrame.compound,../reference/api/pandas.DataFrame.compound
+generated/pandas.DataFrame.convert_objects,../reference/api/pandas.DataFrame.convert_objects
+generated/pandas.DataFrame.copy,../reference/api/pandas.DataFrame.copy
+generated/pandas.DataFrame.corr,../reference/api/pandas.DataFrame.corr
+generated/pandas.DataFrame.corrwith,../reference/api/pandas.DataFrame.corrwith
+generated/pandas.DataFrame.count,../reference/api/pandas.DataFrame.count
+generated/pandas.DataFrame.cov,../reference/api/pandas.DataFrame.cov
+generated/pandas.DataFrame.cummax,../reference/api/pandas.DataFrame.cummax
+generated/pandas.DataFrame.cummin,../reference/api/pandas.DataFrame.cummin
+generated/pandas.DataFrame.cumprod,../reference/api/pandas.DataFrame.cumprod
+generated/pandas.DataFrame.cumsum,../reference/api/pandas.DataFrame.cumsum
+generated/pandas.DataFrame.describe,../reference/api/pandas.DataFrame.describe
+generated/pandas.DataFrame.diff,../reference/api/pandas.DataFrame.diff
+generated/pandas.DataFrame.div,../reference/api/pandas.DataFrame.div
+generated/pandas.DataFrame.divide,../reference/api/pandas.DataFrame.divide
+generated/pandas.DataFrame.dot,../reference/api/pandas.DataFrame.dot
+generated/pandas.DataFrame.drop_duplicates,../reference/api/pandas.DataFrame.drop_duplicates
+generated/pandas.DataFrame.drop,../reference/api/pandas.DataFrame.drop
+generated/pandas.DataFrame.droplevel,../reference/api/pandas.DataFrame.droplevel
+generated/pandas.DataFrame.dropna,../reference/api/pandas.DataFrame.dropna
+generated/pandas.DataFrame.dtypes,../reference/api/pandas.DataFrame.dtypes
+generated/pandas.DataFrame.duplicated,../reference/api/pandas.DataFrame.duplicated
+generated/pandas.DataFrame.empty,../reference/api/pandas.DataFrame.empty
+generated/pandas.DataFrame.eq,../reference/api/pandas.DataFrame.eq
+generated/pandas.DataFrame.equals,../reference/api/pandas.DataFrame.equals
+generated/pandas.DataFrame.eval,../reference/api/pandas.DataFrame.eval
+generated/pandas.DataFrame.ewm,../reference/api/pandas.DataFrame.ewm
+generated/pandas.DataFrame.expanding,../reference/api/pandas.DataFrame.expanding
+generated/pandas.DataFrame.ffill,../reference/api/pandas.DataFrame.ffill
+generated/pandas.DataFrame.fillna,../reference/api/pandas.DataFrame.fillna
+generated/pandas.DataFrame.filter,../reference/api/pandas.DataFrame.filter
+generated/pandas.DataFrame.first,../reference/api/pandas.DataFrame.first
+generated/pandas.DataFrame.first_valid_index,../reference/api/pandas.DataFrame.first_valid_index
+generated/pandas.DataFrame.floordiv,../reference/api/pandas.DataFrame.floordiv
+generated/pandas.DataFrame.from_csv,../reference/api/pandas.DataFrame.from_csv
+generated/pandas.DataFrame.from_dict,../reference/api/pandas.DataFrame.from_dict
+generated/pandas.DataFrame.from_items,../reference/api/pandas.DataFrame.from_items
+generated/pandas.DataFrame.from_records,../reference/api/pandas.DataFrame.from_records
+generated/pandas.DataFrame.ftypes,../reference/api/pandas.DataFrame.ftypes
+generated/pandas.DataFrame.ge,../reference/api/pandas.DataFrame.ge
+generated/pandas.DataFrame.get_dtype_counts,../reference/api/pandas.DataFrame.get_dtype_counts
+generated/pandas.DataFrame.get_ftype_counts,../reference/api/pandas.DataFrame.get_ftype_counts
+generated/pandas.DataFrame.get,../reference/api/pandas.DataFrame.get
+generated/pandas.DataFrame.get_value,../reference/api/pandas.DataFrame.get_value
+generated/pandas.DataFrame.get_values,../reference/api/pandas.DataFrame.get_values
+generated/pandas.DataFrame.groupby,../reference/api/pandas.DataFrame.groupby
+generated/pandas.DataFrame.gt,../reference/api/pandas.DataFrame.gt
+generated/pandas.DataFrame.head,../reference/api/pandas.DataFrame.head
+generated/pandas.DataFrame.hist,../reference/api/pandas.DataFrame.hist
+generated/pandas.DataFrame,../reference/api/pandas.DataFrame
+generated/pandas.DataFrame.iat,../reference/api/pandas.DataFrame.iat
+generated/pandas.DataFrame.idxmax,../reference/api/pandas.DataFrame.idxmax
+generated/pandas.DataFrame.idxmin,../reference/api/pandas.DataFrame.idxmin
+generated/pandas.DataFrame.iloc,../reference/api/pandas.DataFrame.iloc
+generated/pandas.DataFrame.index,../reference/api/pandas.DataFrame.index
+generated/pandas.DataFrame.infer_objects,../reference/api/pandas.DataFrame.infer_objects
+generated/pandas.DataFrame.info,../reference/api/pandas.DataFrame.info
+generated/pandas.DataFrame.insert,../reference/api/pandas.DataFrame.insert
+generated/pandas.DataFrame.interpolate,../reference/api/pandas.DataFrame.interpolate
+generated/pandas.DataFrame.is_copy,../reference/api/pandas.DataFrame.is_copy
+generated/pandas.DataFrame.isin,../reference/api/pandas.DataFrame.isin
+generated/pandas.DataFrame.isna,../reference/api/pandas.DataFrame.isna
+generated/pandas.DataFrame.isnull,../reference/api/pandas.DataFrame.isnull
+generated/pandas.DataFrame.items,../reference/api/pandas.DataFrame.items
+generated/pandas.DataFrame.__iter__,../reference/api/pandas.DataFrame.__iter__
+generated/pandas.DataFrame.iteritems,../reference/api/pandas.DataFrame.iteritems
+generated/pandas.DataFrame.iterrows,../reference/api/pandas.DataFrame.iterrows
+generated/pandas.DataFrame.itertuples,../reference/api/pandas.DataFrame.itertuples
+generated/pandas.DataFrame.ix,../reference/api/pandas.DataFrame.ix
+generated/pandas.DataFrame.join,../reference/api/pandas.DataFrame.join
+generated/pandas.DataFrame.keys,../reference/api/pandas.DataFrame.keys
+generated/pandas.DataFrame.kurt,../reference/api/pandas.DataFrame.kurt
+generated/pandas.DataFrame.kurtosis,../reference/api/pandas.DataFrame.kurtosis
+generated/pandas.DataFrame.last,../reference/api/pandas.DataFrame.last
+generated/pandas.DataFrame.last_valid_index,../reference/api/pandas.DataFrame.last_valid_index
+generated/pandas.DataFrame.le,../reference/api/pandas.DataFrame.le
+generated/pandas.DataFrame.loc,../reference/api/pandas.DataFrame.loc
+generated/pandas.DataFrame.lookup,../reference/api/pandas.DataFrame.lookup
+generated/pandas.DataFrame.lt,../reference/api/pandas.DataFrame.lt
+generated/pandas.DataFrame.mad,../reference/api/pandas.DataFrame.mad
+generated/pandas.DataFrame.mask,../reference/api/pandas.DataFrame.mask
+generated/pandas.DataFrame.max,../reference/api/pandas.DataFrame.max
+generated/pandas.DataFrame.mean,../reference/api/pandas.DataFrame.mean
+generated/pandas.DataFrame.median,../reference/api/pandas.DataFrame.median
+generated/pandas.DataFrame.melt,../reference/api/pandas.DataFrame.melt
+generated/pandas.DataFrame.memory_usage,../reference/api/pandas.DataFrame.memory_usage
+generated/pandas.DataFrame.merge,../reference/api/pandas.DataFrame.merge
+generated/pandas.DataFrame.min,../reference/api/pandas.DataFrame.min
+generated/pandas.DataFrame.mode,../reference/api/pandas.DataFrame.mode
+generated/pandas.DataFrame.mod,../reference/api/pandas.DataFrame.mod
+generated/pandas.DataFrame.mul,../reference/api/pandas.DataFrame.mul
+generated/pandas.DataFrame.multiply,../reference/api/pandas.DataFrame.multiply
+generated/pandas.DataFrame.ndim,../reference/api/pandas.DataFrame.ndim
+generated/pandas.DataFrame.ne,../reference/api/pandas.DataFrame.ne
+generated/pandas.DataFrame.nlargest,../reference/api/pandas.DataFrame.nlargest
+generated/pandas.DataFrame.notna,../reference/api/pandas.DataFrame.notna
+generated/pandas.DataFrame.notnull,../reference/api/pandas.DataFrame.notnull
+generated/pandas.DataFrame.nsmallest,../reference/api/pandas.DataFrame.nsmallest
+generated/pandas.DataFrame.nunique,../reference/api/pandas.DataFrame.nunique
+generated/pandas.DataFrame.pct_change,../reference/api/pandas.DataFrame.pct_change
+generated/pandas.DataFrame.pipe,../reference/api/pandas.DataFrame.pipe
+generated/pandas.DataFrame.pivot,../reference/api/pandas.DataFrame.pivot
+generated/pandas.DataFrame.pivot_table,../reference/api/pandas.DataFrame.pivot_table
+generated/pandas.DataFrame.plot.barh,../reference/api/pandas.DataFrame.plot.barh
+generated/pandas.DataFrame.plot.bar,../reference/api/pandas.DataFrame.plot.bar
+generated/pandas.DataFrame.plot.box,../reference/api/pandas.DataFrame.plot.box
+generated/pandas.DataFrame.plot.density,../reference/api/pandas.DataFrame.plot.density
+generated/pandas.DataFrame.plot.hexbin,../reference/api/pandas.DataFrame.plot.hexbin
+generated/pandas.DataFrame.plot.hist,../reference/api/pandas.DataFrame.plot.hist
+generated/pandas.DataFrame.plot,../reference/api/pandas.DataFrame.plot
+generated/pandas.DataFrame.plot.kde,../reference/api/pandas.DataFrame.plot.kde
+generated/pandas.DataFrame.plot.line,../reference/api/pandas.DataFrame.plot.line
+generated/pandas.DataFrame.plot.pie,../reference/api/pandas.DataFrame.plot.pie
+generated/pandas.DataFrame.plot.scatter,../reference/api/pandas.DataFrame.plot.scatter
+generated/pandas.DataFrame.pop,../reference/api/pandas.DataFrame.pop
+generated/pandas.DataFrame.pow,../reference/api/pandas.DataFrame.pow
+generated/pandas.DataFrame.prod,../reference/api/pandas.DataFrame.prod
+generated/pandas.DataFrame.product,../reference/api/pandas.DataFrame.product
+generated/pandas.DataFrame.quantile,../reference/api/pandas.DataFrame.quantile
+generated/pandas.DataFrame.query,../reference/api/pandas.DataFrame.query
+generated/pandas.DataFrame.radd,../reference/api/pandas.DataFrame.radd
+generated/pandas.DataFrame.rank,../reference/api/pandas.DataFrame.rank
+generated/pandas.DataFrame.rdiv,../reference/api/pandas.DataFrame.rdiv
+generated/pandas.DataFrame.reindex_axis,../reference/api/pandas.DataFrame.reindex_axis
+generated/pandas.DataFrame.reindex,../reference/api/pandas.DataFrame.reindex
+generated/pandas.DataFrame.reindex_like,../reference/api/pandas.DataFrame.reindex_like
+generated/pandas.DataFrame.rename_axis,../reference/api/pandas.DataFrame.rename_axis
+generated/pandas.DataFrame.rename,../reference/api/pandas.DataFrame.rename
+generated/pandas.DataFrame.reorder_levels,../reference/api/pandas.DataFrame.reorder_levels
+generated/pandas.DataFrame.replace,../reference/api/pandas.DataFrame.replace
+generated/pandas.DataFrame.resample,../reference/api/pandas.DataFrame.resample
+generated/pandas.DataFrame.reset_index,../reference/api/pandas.DataFrame.reset_index
+generated/pandas.DataFrame.rfloordiv,../reference/api/pandas.DataFrame.rfloordiv
+generated/pandas.DataFrame.rmod,../reference/api/pandas.DataFrame.rmod
+generated/pandas.DataFrame.rmul,../reference/api/pandas.DataFrame.rmul
+generated/pandas.DataFrame.rolling,../reference/api/pandas.DataFrame.rolling
+generated/pandas.DataFrame.round,../reference/api/pandas.DataFrame.round
+generated/pandas.DataFrame.rpow,../reference/api/pandas.DataFrame.rpow
+generated/pandas.DataFrame.rsub,../reference/api/pandas.DataFrame.rsub
+generated/pandas.DataFrame.rtruediv,../reference/api/pandas.DataFrame.rtruediv
+generated/pandas.DataFrame.sample,../reference/api/pandas.DataFrame.sample
+generated/pandas.DataFrame.select_dtypes,../reference/api/pandas.DataFrame.select_dtypes
+generated/pandas.DataFrame.select,../reference/api/pandas.DataFrame.select
+generated/pandas.DataFrame.sem,../reference/api/pandas.DataFrame.sem
+generated/pandas.DataFrame.set_axis,../reference/api/pandas.DataFrame.set_axis
+generated/pandas.DataFrame.set_index,../reference/api/pandas.DataFrame.set_index
+generated/pandas.DataFrame.set_value,../reference/api/pandas.DataFrame.set_value
+generated/pandas.DataFrame.shape,../reference/api/pandas.DataFrame.shape
+generated/pandas.DataFrame.shift,../reference/api/pandas.DataFrame.shift
+generated/pandas.DataFrame.size,../reference/api/pandas.DataFrame.size
+generated/pandas.DataFrame.skew,../reference/api/pandas.DataFrame.skew
+generated/pandas.DataFrame.slice_shift,../reference/api/pandas.DataFrame.slice_shift
+generated/pandas.DataFrame.sort_index,../reference/api/pandas.DataFrame.sort_index
+generated/pandas.DataFrame.sort_values,../reference/api/pandas.DataFrame.sort_values
+generated/pandas.DataFrame.squeeze,../reference/api/pandas.DataFrame.squeeze
+generated/pandas.DataFrame.stack,../reference/api/pandas.DataFrame.stack
+generated/pandas.DataFrame.std,../reference/api/pandas.DataFrame.std
+generated/pandas.DataFrame.style,../reference/api/pandas.DataFrame.style
+generated/pandas.DataFrame.sub,../reference/api/pandas.DataFrame.sub
+generated/pandas.DataFrame.subtract,../reference/api/pandas.DataFrame.subtract
+generated/pandas.DataFrame.sum,../reference/api/pandas.DataFrame.sum
+generated/pandas.DataFrame.swapaxes,../reference/api/pandas.DataFrame.swapaxes
+generated/pandas.DataFrame.swaplevel,../reference/api/pandas.DataFrame.swaplevel
+generated/pandas.DataFrame.tail,../reference/api/pandas.DataFrame.tail
+generated/pandas.DataFrame.take,../reference/api/pandas.DataFrame.take
+generated/pandas.DataFrame.T,../reference/api/pandas.DataFrame.T
+generated/pandas.DataFrame.timetuple,../reference/api/pandas.DataFrame.timetuple
+generated/pandas.DataFrame.to_clipboard,../reference/api/pandas.DataFrame.to_clipboard
+generated/pandas.DataFrame.to_csv,../reference/api/pandas.DataFrame.to_csv
+generated/pandas.DataFrame.to_dense,../reference/api/pandas.DataFrame.to_dense
+generated/pandas.DataFrame.to_dict,../reference/api/pandas.DataFrame.to_dict
+generated/pandas.DataFrame.to_excel,../reference/api/pandas.DataFrame.to_excel
+generated/pandas.DataFrame.to_feather,../reference/api/pandas.DataFrame.to_feather
+generated/pandas.DataFrame.to_gbq,../reference/api/pandas.DataFrame.to_gbq
+generated/pandas.DataFrame.to_hdf,../reference/api/pandas.DataFrame.to_hdf
+generated/pandas.DataFrame.to,../reference/api/pandas.DataFrame.to
+generated/pandas.DataFrame.to_json,../reference/api/pandas.DataFrame.to_json
+generated/pandas.DataFrame.to_latex,../reference/api/pandas.DataFrame.to_latex
+generated/pandas.DataFrame.to_msgpack,../reference/api/pandas.DataFrame.to_msgpack
+generated/pandas.DataFrame.to_numpy,../reference/api/pandas.DataFrame.to_numpy
+generated/pandas.DataFrame.to_panel,../reference/api/pandas.DataFrame.to_panel
+generated/pandas.DataFrame.to_parquet,../reference/api/pandas.DataFrame.to_parquet
+generated/pandas.DataFrame.to_period,../reference/api/pandas.DataFrame.to_period
+generated/pandas.DataFrame.to_pickle,../reference/api/pandas.DataFrame.to_pickle
+generated/pandas.DataFrame.to_records,../reference/api/pandas.DataFrame.to_records
+generated/pandas.DataFrame.to_sparse,../reference/api/pandas.DataFrame.to_sparse
+generated/pandas.DataFrame.to_sql,../reference/api/pandas.DataFrame.to_sql
+generated/pandas.DataFrame.to_stata,../reference/api/pandas.DataFrame.to_stata
+generated/pandas.DataFrame.to_string,../reference/api/pandas.DataFrame.to_string
+generated/pandas.DataFrame.to_timestamp,../reference/api/pandas.DataFrame.to_timestamp
+generated/pandas.DataFrame.to_xarray,../reference/api/pandas.DataFrame.to_xarray
+generated/pandas.DataFrame.transform,../reference/api/pandas.DataFrame.transform
+generated/pandas.DataFrame.transpose,../reference/api/pandas.DataFrame.transpose
+generated/pandas.DataFrame.truediv,../reference/api/pandas.DataFrame.truediv
+generated/pandas.DataFrame.truncate,../reference/api/pandas.DataFrame.truncate
+generated/pandas.DataFrame.tshift,../reference/api/pandas.DataFrame.tshift
+generated/pandas.DataFrame.tz_convert,../reference/api/pandas.DataFrame.tz_convert
+generated/pandas.DataFrame.tz_localize,../reference/api/pandas.DataFrame.tz_localize
+generated/pandas.DataFrame.unstack,../reference/api/pandas.DataFrame.unstack
+generated/pandas.DataFrame.update,../reference/api/pandas.DataFrame.update
+generated/pandas.DataFrame.values,../reference/api/pandas.DataFrame.values
+generated/pandas.DataFrame.var,../reference/api/pandas.DataFrame.var
+generated/pandas.DataFrame.where,../reference/api/pandas.DataFrame.where
+generated/pandas.DataFrame.xs,../reference/api/pandas.DataFrame.xs
+generated/pandas.date_range,../reference/api/pandas.date_range
+generated/pandas.DatetimeIndex.ceil,../reference/api/pandas.DatetimeIndex.ceil
+generated/pandas.DatetimeIndex.date,../reference/api/pandas.DatetimeIndex.date
+generated/pandas.DatetimeIndex.day,../reference/api/pandas.DatetimeIndex.day
+generated/pandas.DatetimeIndex.day_name,../reference/api/pandas.DatetimeIndex.day_name
+generated/pandas.DatetimeIndex.dayofweek,../reference/api/pandas.DatetimeIndex.dayofweek
+generated/pandas.DatetimeIndex.dayofyear,../reference/api/pandas.DatetimeIndex.dayofyear
+generated/pandas.DatetimeIndex.floor,../reference/api/pandas.DatetimeIndex.floor
+generated/pandas.DatetimeIndex.freq,../reference/api/pandas.DatetimeIndex.freq
+generated/pandas.DatetimeIndex.freqstr,../reference/api/pandas.DatetimeIndex.freqstr
+generated/pandas.DatetimeIndex.hour,../reference/api/pandas.DatetimeIndex.hour
+generated/pandas.DatetimeIndex,../reference/api/pandas.DatetimeIndex
+generated/pandas.DatetimeIndex.indexer_at_time,../reference/api/pandas.DatetimeIndex.indexer_at_time
+generated/pandas.DatetimeIndex.indexer_between_time,../reference/api/pandas.DatetimeIndex.indexer_between_time
+generated/pandas.DatetimeIndex.inferred_freq,../reference/api/pandas.DatetimeIndex.inferred_freq
+generated/pandas.DatetimeIndex.is_leap_year,../reference/api/pandas.DatetimeIndex.is_leap_year
+generated/pandas.DatetimeIndex.is_month_end,../reference/api/pandas.DatetimeIndex.is_month_end
+generated/pandas.DatetimeIndex.is_month_start,../reference/api/pandas.DatetimeIndex.is_month_start
+generated/pandas.DatetimeIndex.is_quarter_end,../reference/api/pandas.DatetimeIndex.is_quarter_end
+generated/pandas.DatetimeIndex.is_quarter_start,../reference/api/pandas.DatetimeIndex.is_quarter_start
+generated/pandas.DatetimeIndex.is_year_end,../reference/api/pandas.DatetimeIndex.is_year_end
+generated/pandas.DatetimeIndex.is_year_start,../reference/api/pandas.DatetimeIndex.is_year_start
+generated/pandas.DatetimeIndex.microsecond,../reference/api/pandas.DatetimeIndex.microsecond
+generated/pandas.DatetimeIndex.minute,../reference/api/pandas.DatetimeIndex.minute
+generated/pandas.DatetimeIndex.month,../reference/api/pandas.DatetimeIndex.month
+generated/pandas.DatetimeIndex.month_name,../reference/api/pandas.DatetimeIndex.month_name
+generated/pandas.DatetimeIndex.nanosecond,../reference/api/pandas.DatetimeIndex.nanosecond
+generated/pandas.DatetimeIndex.normalize,../reference/api/pandas.DatetimeIndex.normalize
+generated/pandas.DatetimeIndex.quarter,../reference/api/pandas.DatetimeIndex.quarter
+generated/pandas.DatetimeIndex.round,../reference/api/pandas.DatetimeIndex.round
+generated/pandas.DatetimeIndex.second,../reference/api/pandas.DatetimeIndex.second
+generated/pandas.DatetimeIndex.snap,../reference/api/pandas.DatetimeIndex.snap
+generated/pandas.DatetimeIndex.strftime,../reference/api/pandas.DatetimeIndex.strftime
+generated/pandas.DatetimeIndex.time,../reference/api/pandas.DatetimeIndex.time
+generated/pandas.DatetimeIndex.timetz,../reference/api/pandas.DatetimeIndex.timetz
+generated/pandas.DatetimeIndex.to_frame,../reference/api/pandas.DatetimeIndex.to_frame
+generated/pandas.DatetimeIndex.to_perioddelta,../reference/api/pandas.DatetimeIndex.to_perioddelta
+generated/pandas.DatetimeIndex.to_period,../reference/api/pandas.DatetimeIndex.to_period
+generated/pandas.DatetimeIndex.to_pydatetime,../reference/api/pandas.DatetimeIndex.to_pydatetime
+generated/pandas.DatetimeIndex.to_series,../reference/api/pandas.DatetimeIndex.to_series
+generated/pandas.DatetimeIndex.tz_convert,../reference/api/pandas.DatetimeIndex.tz_convert
+generated/pandas.DatetimeIndex.tz,../reference/api/pandas.DatetimeIndex.tz
+generated/pandas.DatetimeIndex.tz_localize,../reference/api/pandas.DatetimeIndex.tz_localize
+generated/pandas.DatetimeIndex.weekday,../reference/api/pandas.DatetimeIndex.weekday
+generated/pandas.DatetimeIndex.week,../reference/api/pandas.DatetimeIndex.week
+generated/pandas.DatetimeIndex.weekofyear,../reference/api/pandas.DatetimeIndex.weekofyear
+generated/pandas.DatetimeIndex.year,../reference/api/pandas.DatetimeIndex.year
+generated/pandas.DatetimeTZDtype.base,../reference/api/pandas.DatetimeTZDtype.base
+generated/pandas.DatetimeTZDtype.construct_array_type,../reference/api/pandas.DatetimeTZDtype.construct_array_type
+generated/pandas.DatetimeTZDtype.construct_from_string,../reference/api/pandas.DatetimeTZDtype.construct_from_string
+generated/pandas.DatetimeTZDtype,../reference/api/pandas.DatetimeTZDtype
+generated/pandas.DatetimeTZDtype.isbuiltin,../reference/api/pandas.DatetimeTZDtype.isbuiltin
+generated/pandas.DatetimeTZDtype.is_dtype,../reference/api/pandas.DatetimeTZDtype.is_dtype
+generated/pandas.DatetimeTZDtype.isnative,../reference/api/pandas.DatetimeTZDtype.isnative
+generated/pandas.DatetimeTZDtype.itemsize,../reference/api/pandas.DatetimeTZDtype.itemsize
+generated/pandas.DatetimeTZDtype.kind,../reference/api/pandas.DatetimeTZDtype.kind
+generated/pandas.DatetimeTZDtype.name,../reference/api/pandas.DatetimeTZDtype.name
+generated/pandas.DatetimeTZDtype.names,../reference/api/pandas.DatetimeTZDtype.names
+generated/pandas.DatetimeTZDtype.na_value,../reference/api/pandas.DatetimeTZDtype.na_value
+generated/pandas.DatetimeTZDtype.num,../reference/api/pandas.DatetimeTZDtype.num
+generated/pandas.DatetimeTZDtype.reset_cache,../reference/api/pandas.DatetimeTZDtype.reset_cache
+generated/pandas.DatetimeTZDtype.shape,../reference/api/pandas.DatetimeTZDtype.shape
+generated/pandas.DatetimeTZDtype.str,../reference/api/pandas.DatetimeTZDtype.str
+generated/pandas.DatetimeTZDtype.subdtype,../reference/api/pandas.DatetimeTZDtype.subdtype
+generated/pandas.DatetimeTZDtype.tz,../reference/api/pandas.DatetimeTZDtype.tz
+generated/pandas.DatetimeTZDtype.unit,../reference/api/pandas.DatetimeTZDtype.unit
+generated/pandas.describe_option,../reference/api/pandas.describe_option
+generated/pandas.errors.DtypeWarning,../reference/api/pandas.errors.DtypeWarning
+generated/pandas.errors.EmptyDataError,../reference/api/pandas.errors.EmptyDataError
+generated/pandas.errors.OutOfBoundsDatetime,../reference/api/pandas.errors.OutOfBoundsDatetime
+generated/pandas.errors.ParserError,../reference/api/pandas.errors.ParserError
+generated/pandas.errors.ParserWarning,../reference/api/pandas.errors.ParserWarning
+generated/pandas.errors.PerformanceWarning,../reference/api/pandas.errors.PerformanceWarning
+generated/pandas.errors.UnsortedIndexError,../reference/api/pandas.errors.UnsortedIndexError
+generated/pandas.errors.UnsupportedFunctionCall,../reference/api/pandas.errors.UnsupportedFunctionCall
+generated/pandas.eval,../reference/api/pandas.eval
+generated/pandas.ExcelFile.parse,../reference/api/pandas.ExcelFile.parse
+generated/pandas.ExcelWriter,../reference/api/pandas.ExcelWriter
+generated/pandas.factorize,../reference/api/pandas.factorize
+generated/pandas.Float64Index,../reference/api/pandas.Float64Index
+generated/pandas.get_dummies,../reference/api/pandas.get_dummies
+generated/pandas.get_option,../reference/api/pandas.get_option
+generated/pandas.Grouper,../reference/api/pandas.Grouper
+generated/pandas.HDFStore.append,../reference/api/pandas.HDFStore.append
+generated/pandas.HDFStore.get,../reference/api/pandas.HDFStore.get
+generated/pandas.HDFStore.groups,../reference/api/pandas.HDFStore.groups
+generated/pandas.HDFStore.info,../reference/api/pandas.HDFStore.info
+generated/pandas.HDFStore.keys,../reference/api/pandas.HDFStore.keys
+generated/pandas.HDFStore.put,../reference/api/pandas.HDFStore.put
+generated/pandas.HDFStore.select,../reference/api/pandas.HDFStore.select
+generated/pandas.HDFStore.walk,../reference/api/pandas.HDFStore.walk
+generated/pandas.Index.all,../reference/api/pandas.Index.all
+generated/pandas.Index.any,../reference/api/pandas.Index.any
+generated/pandas.Index.append,../reference/api/pandas.Index.append
+generated/pandas.Index.argmax,../reference/api/pandas.Index.argmax
+generated/pandas.Index.argmin,../reference/api/pandas.Index.argmin
+generated/pandas.Index.argsort,../reference/api/pandas.Index.argsort
+generated/pandas.Index.array,../reference/api/pandas.Index.array
+generated/pandas.Index.asi8,../reference/api/pandas.Index.asi8
+generated/pandas.Index.asof,../reference/api/pandas.Index.asof
+generated/pandas.Index.asof_locs,../reference/api/pandas.Index.asof_locs
+generated/pandas.Index.astype,../reference/api/pandas.Index.astype
+generated/pandas.Index.base,../reference/api/pandas.Index.base
+generated/pandas.Index.contains,../reference/api/pandas.Index.contains
+generated/pandas.Index.copy,../reference/api/pandas.Index.copy
+generated/pandas.Index.data,../reference/api/pandas.Index.data
+generated/pandas.Index.delete,../reference/api/pandas.Index.delete
+generated/pandas.Index.difference,../reference/api/pandas.Index.difference
+generated/pandas.Index.drop_duplicates,../reference/api/pandas.Index.drop_duplicates
+generated/pandas.Index.drop,../reference/api/pandas.Index.drop
+generated/pandas.Index.droplevel,../reference/api/pandas.Index.droplevel
+generated/pandas.Index.dropna,../reference/api/pandas.Index.dropna
+generated/pandas.Index.dtype,../reference/api/pandas.Index.dtype
+generated/pandas.Index.dtype_str,../reference/api/pandas.Index.dtype_str
+generated/pandas.Index.duplicated,../reference/api/pandas.Index.duplicated
+generated/pandas.Index.empty,../reference/api/pandas.Index.empty
+generated/pandas.Index.equals,../reference/api/pandas.Index.equals
+generated/pandas.Index.factorize,../reference/api/pandas.Index.factorize
+generated/pandas.Index.fillna,../reference/api/pandas.Index.fillna
+generated/pandas.Index.flags,../reference/api/pandas.Index.flags
+generated/pandas.Index.format,../reference/api/pandas.Index.format
+generated/pandas.Index.get_duplicates,../reference/api/pandas.Index.get_duplicates
+generated/pandas.Index.get_indexer_for,../reference/api/pandas.Index.get_indexer_for
+generated/pandas.Index.get_indexer,../reference/api/pandas.Index.get_indexer
+generated/pandas.Index.get_indexer_non_unique,../reference/api/pandas.Index.get_indexer_non_unique
+generated/pandas.Index.get_level_values,../reference/api/pandas.Index.get_level_values
+generated/pandas.Index.get_loc,../reference/api/pandas.Index.get_loc
+generated/pandas.Index.get_slice_bound,../reference/api/pandas.Index.get_slice_bound
+generated/pandas.Index.get_value,../reference/api/pandas.Index.get_value
+generated/pandas.Index.get_values,../reference/api/pandas.Index.get_values
+generated/pandas.Index.groupby,../reference/api/pandas.Index.groupby
+generated/pandas.Index.has_duplicates,../reference/api/pandas.Index.has_duplicates
+generated/pandas.Index.hasnans,../reference/api/pandas.Index.hasnans
+generated/pandas.Index.holds_integer,../reference/api/pandas.Index.holds_integer
+generated/pandas.Index,../reference/api/pandas.Index
+generated/pandas.Index.identical,../reference/api/pandas.Index.identical
+generated/pandas.Index.inferred_type,../reference/api/pandas.Index.inferred_type
+generated/pandas.Index.insert,../reference/api/pandas.Index.insert
+generated/pandas.Index.intersection,../reference/api/pandas.Index.intersection
+generated/pandas.Index.is_all_dates,../reference/api/pandas.Index.is_all_dates
+generated/pandas.Index.is_boolean,../reference/api/pandas.Index.is_boolean
+generated/pandas.Index.is_categorical,../reference/api/pandas.Index.is_categorical
+generated/pandas.Index.is_floating,../reference/api/pandas.Index.is_floating
+generated/pandas.Index.is_,../reference/api/pandas.Index.is_
+generated/pandas.Index.isin,../reference/api/pandas.Index.isin
+generated/pandas.Index.is_integer,../reference/api/pandas.Index.is_integer
+generated/pandas.Index.is_interval,../reference/api/pandas.Index.is_interval
+generated/pandas.Index.is_lexsorted_for_tuple,../reference/api/pandas.Index.is_lexsorted_for_tuple
+generated/pandas.Index.is_mixed,../reference/api/pandas.Index.is_mixed
+generated/pandas.Index.is_monotonic_decreasing,../reference/api/pandas.Index.is_monotonic_decreasing
+generated/pandas.Index.is_monotonic,../reference/api/pandas.Index.is_monotonic
+generated/pandas.Index.is_monotonic_increasing,../reference/api/pandas.Index.is_monotonic_increasing
+generated/pandas.Index.isna,../reference/api/pandas.Index.isna
+generated/pandas.Index.isnull,../reference/api/pandas.Index.isnull
+generated/pandas.Index.is_numeric,../reference/api/pandas.Index.is_numeric
+generated/pandas.Index.is_object,../reference/api/pandas.Index.is_object
+generated/pandas.Index.is_type_compatible,../reference/api/pandas.Index.is_type_compatible
+generated/pandas.Index.is_unique,../reference/api/pandas.Index.is_unique
+generated/pandas.Index.item,../reference/api/pandas.Index.item
+generated/pandas.Index.itemsize,../reference/api/pandas.Index.itemsize
+generated/pandas.Index.join,../reference/api/pandas.Index.join
+generated/pandas.Index.map,../reference/api/pandas.Index.map
+generated/pandas.Index.max,../reference/api/pandas.Index.max
+generated/pandas.Index.memory_usage,../reference/api/pandas.Index.memory_usage
+generated/pandas.Index.min,../reference/api/pandas.Index.min
+generated/pandas.Index.name,../reference/api/pandas.Index.name
+generated/pandas.Index.names,../reference/api/pandas.Index.names
+generated/pandas.Index.nbytes,../reference/api/pandas.Index.nbytes
+generated/pandas.Index.ndim,../reference/api/pandas.Index.ndim
+generated/pandas.Index.nlevels,../reference/api/pandas.Index.nlevels
+generated/pandas.Index.notna,../reference/api/pandas.Index.notna
+generated/pandas.Index.notnull,../reference/api/pandas.Index.notnull
+generated/pandas.Index.nunique,../reference/api/pandas.Index.nunique
+generated/pandas.Index.putmask,../reference/api/pandas.Index.putmask
+generated/pandas.Index.ravel,../reference/api/pandas.Index.ravel
+generated/pandas.Index.reindex,../reference/api/pandas.Index.reindex
+generated/pandas.Index.rename,../reference/api/pandas.Index.rename
+generated/pandas.Index.repeat,../reference/api/pandas.Index.repeat
+generated/pandas.Index.searchsorted,../reference/api/pandas.Index.searchsorted
+generated/pandas.Index.set_names,../reference/api/pandas.Index.set_names
+generated/pandas.Index.set_value,../reference/api/pandas.Index.set_value
+generated/pandas.Index.shape,../reference/api/pandas.Index.shape
+generated/pandas.Index.shift,../reference/api/pandas.Index.shift
+generated/pandas.Index.size,../reference/api/pandas.Index.size
+generated/pandas.IndexSlice,../reference/api/pandas.IndexSlice
+generated/pandas.Index.slice_indexer,../reference/api/pandas.Index.slice_indexer
+generated/pandas.Index.slice_locs,../reference/api/pandas.Index.slice_locs
+generated/pandas.Index.sort,../reference/api/pandas.Index.sort
+generated/pandas.Index.sortlevel,../reference/api/pandas.Index.sortlevel
+generated/pandas.Index.sort_values,../reference/api/pandas.Index.sort_values
+generated/pandas.Index.str,../reference/api/pandas.Index.str
+generated/pandas.Index.strides,../reference/api/pandas.Index.strides
+generated/pandas.Index.summary,../reference/api/pandas.Index.summary
+generated/pandas.Index.symmetric_difference,../reference/api/pandas.Index.symmetric_difference
+generated/pandas.Index.take,../reference/api/pandas.Index.take
+generated/pandas.Index.T,../reference/api/pandas.Index.T
+generated/pandas.Index.to_flat_index,../reference/api/pandas.Index.to_flat_index
+generated/pandas.Index.to_frame,../reference/api/pandas.Index.to_frame
+generated/pandas.Index.to_list,../reference/api/pandas.Index.to_list
+generated/pandas.Index.tolist,../reference/api/pandas.Index.tolist
+generated/pandas.Index.to_native_types,../reference/api/pandas.Index.to_native_types
+generated/pandas.Index.to_numpy,../reference/api/pandas.Index.to_numpy
+generated/pandas.Index.to_series,../reference/api/pandas.Index.to_series
+generated/pandas.Index.transpose,../reference/api/pandas.Index.transpose
+generated/pandas.Index.union,../reference/api/pandas.Index.union
+generated/pandas.Index.unique,../reference/api/pandas.Index.unique
+generated/pandas.Index.value_counts,../reference/api/pandas.Index.value_counts
+generated/pandas.Index.values,../reference/api/pandas.Index.values
+generated/pandas.Index.view,../reference/api/pandas.Index.view
+generated/pandas.Index.where,../reference/api/pandas.Index.where
+generated/pandas.infer_freq,../reference/api/pandas.infer_freq
+generated/pandas.Interval.closed,../reference/api/pandas.Interval.closed
+generated/pandas.Interval.closed_left,../reference/api/pandas.Interval.closed_left
+generated/pandas.Interval.closed_right,../reference/api/pandas.Interval.closed_right
+generated/pandas.Interval,../reference/api/pandas.Interval
+generated/pandas.IntervalIndex.closed,../reference/api/pandas.IntervalIndex.closed
+generated/pandas.IntervalIndex.contains,../reference/api/pandas.IntervalIndex.contains
+generated/pandas.IntervalIndex.from_arrays,../reference/api/pandas.IntervalIndex.from_arrays
+generated/pandas.IntervalIndex.from_breaks,../reference/api/pandas.IntervalIndex.from_breaks
+generated/pandas.IntervalIndex.from_tuples,../reference/api/pandas.IntervalIndex.from_tuples
+generated/pandas.IntervalIndex.get_indexer,../reference/api/pandas.IntervalIndex.get_indexer
+generated/pandas.IntervalIndex.get_loc,../reference/api/pandas.IntervalIndex.get_loc
+generated/pandas.IntervalIndex,../reference/api/pandas.IntervalIndex
+generated/pandas.IntervalIndex.is_non_overlapping_monotonic,../reference/api/pandas.IntervalIndex.is_non_overlapping_monotonic
+generated/pandas.IntervalIndex.is_overlapping,../reference/api/pandas.IntervalIndex.is_overlapping
+generated/pandas.IntervalIndex.left,../reference/api/pandas.IntervalIndex.left
+generated/pandas.IntervalIndex.length,../reference/api/pandas.IntervalIndex.length
+generated/pandas.IntervalIndex.mid,../reference/api/pandas.IntervalIndex.mid
+generated/pandas.IntervalIndex.overlaps,../reference/api/pandas.IntervalIndex.overlaps
+generated/pandas.IntervalIndex.right,../reference/api/pandas.IntervalIndex.right
+generated/pandas.IntervalIndex.set_closed,../reference/api/pandas.IntervalIndex.set_closed
+generated/pandas.IntervalIndex.to_tuples,../reference/api/pandas.IntervalIndex.to_tuples
+generated/pandas.IntervalIndex.values,../reference/api/pandas.IntervalIndex.values
+generated/pandas.Interval.left,../reference/api/pandas.Interval.left
+generated/pandas.Interval.length,../reference/api/pandas.Interval.length
+generated/pandas.Interval.mid,../reference/api/pandas.Interval.mid
+generated/pandas.Interval.open_left,../reference/api/pandas.Interval.open_left
+generated/pandas.Interval.open_right,../reference/api/pandas.Interval.open_right
+generated/pandas.Interval.overlaps,../reference/api/pandas.Interval.overlaps
+generated/pandas.interval_range,../reference/api/pandas.interval_range
+generated/pandas.Interval.right,../reference/api/pandas.Interval.right
+generated/pandas.io.formats.style.Styler.apply,../reference/api/pandas.io.formats.style.Styler.apply
+generated/pandas.io.formats.style.Styler.applymap,../reference/api/pandas.io.formats.style.Styler.applymap
+generated/pandas.io.formats.style.Styler.background_gradient,../reference/api/pandas.io.formats.style.Styler.background_gradient
+generated/pandas.io.formats.style.Styler.bar,../reference/api/pandas.io.formats.style.Styler.bar
+generated/pandas.io.formats.style.Styler.clear,../reference/api/pandas.io.formats.style.Styler.clear
+generated/pandas.io.formats.style.Styler.env,../reference/api/pandas.io.formats.style.Styler.env
+generated/pandas.io.formats.style.Styler.export,../reference/api/pandas.io.formats.style.Styler.export
+generated/pandas.io.formats.style.Styler.format,../reference/api/pandas.io.formats.style.Styler.format
+generated/pandas.io.formats.style.Styler.from_custom_template,../reference/api/pandas.io.formats.style.Styler.from_custom_template
+generated/pandas.io.formats.style.Styler.hide_columns,../reference/api/pandas.io.formats.style.Styler.hide_columns
+generated/pandas.io.formats.style.Styler.hide_index,../reference/api/pandas.io.formats.style.Styler.hide_index
+generated/pandas.io.formats.style.Styler.highlight_max,../reference/api/pandas.io.formats.style.Styler.highlight_max
+generated/pandas.io.formats.style.Styler.highlight_min,../reference/api/pandas.io.formats.style.Styler.highlight_min
+generated/pandas.io.formats.style.Styler.highlight_null,../reference/api/pandas.io.formats.style.Styler.highlight_null
+generated/pandas.io.formats.style.Styler,../reference/api/pandas.io.formats.style.Styler
+generated/pandas.io.formats.style.Styler.loader,../reference/api/pandas.io.formats.style.Styler.loader
+generated/pandas.io.formats.style.Styler.pipe,../reference/api/pandas.io.formats.style.Styler.pipe
+generated/pandas.io.formats.style.Styler.render,../reference/api/pandas.io.formats.style.Styler.render
+generated/pandas.io.formats.style.Styler.set_caption,../reference/api/pandas.io.formats.style.Styler.set_caption
+generated/pandas.io.formats.style.Styler.set_precision,../reference/api/pandas.io.formats.style.Styler.set_precision
+generated/pandas.io.formats.style.Styler.set_properties,../reference/api/pandas.io.formats.style.Styler.set_properties
+generated/pandas.io.formats.style.Styler.set_table_attributes,../reference/api/pandas.io.formats.style.Styler.set_table_attributes
+generated/pandas.io.formats.style.Styler.set_table_styles,../reference/api/pandas.io.formats.style.Styler.set_table_styles
+generated/pandas.io.formats.style.Styler.set_uuid,../reference/api/pandas.io.formats.style.Styler.set_uuid
+generated/pandas.io.formats.style.Styler.template,../reference/api/pandas.io.formats.style.Styler.template
+generated/pandas.io.formats.style.Styler.to_excel,../reference/api/pandas.io.formats.style.Styler.to_excel
+generated/pandas.io.formats.style.Styler.use,../reference/api/pandas.io.formats.style.Styler.use
+generated/pandas.io.formats.style.Styler.where,../reference/api/pandas.io.formats.style.Styler.where
+generated/pandas.io.json.build_table_schema,../reference/api/pandas.io.json.build_table_schema
+generated/pandas.io.json.json_normalize,../reference/api/pandas.io.json.json_normalize
+generated/pandas.io.stata.StataReader.data,../reference/api/pandas.io.stata.StataReader.data
+generated/pandas.io.stata.StataReader.data_label,../reference/api/pandas.io.stata.StataReader.data_label
+generated/pandas.io.stata.StataReader.value_labels,../reference/api/pandas.io.stata.StataReader.value_labels
+generated/pandas.io.stata.StataReader.variable_labels,../reference/api/pandas.io.stata.StataReader.variable_labels
+generated/pandas.io.stata.StataWriter.write_file,../reference/api/pandas.io.stata.StataWriter.write_file
+generated/pandas.isna,../reference/api/pandas.isna
+generated/pandas.isnull,../reference/api/pandas.isnull
+generated/pandas.melt,../reference/api/pandas.melt
+generated/pandas.merge_asof,../reference/api/pandas.merge_asof
+generated/pandas.merge,../reference/api/pandas.merge
+generated/pandas.merge_ordered,../reference/api/pandas.merge_ordered
+generated/pandas.MultiIndex.codes,../reference/api/pandas.MultiIndex.codes
+generated/pandas.MultiIndex.droplevel,../reference/api/pandas.MultiIndex.droplevel
+generated/pandas.MultiIndex.from_arrays,../reference/api/pandas.MultiIndex.from_arrays
+generated/pandas.MultiIndex.from_frame,../reference/api/pandas.MultiIndex.from_frame
+generated/pandas.MultiIndex.from_product,../reference/api/pandas.MultiIndex.from_product
+generated/pandas.MultiIndex.from_tuples,../reference/api/pandas.MultiIndex.from_tuples
+generated/pandas.MultiIndex.get_indexer,../reference/api/pandas.MultiIndex.get_indexer
+generated/pandas.MultiIndex.get_level_values,../reference/api/pandas.MultiIndex.get_level_values
+generated/pandas.MultiIndex.get_loc,../reference/api/pandas.MultiIndex.get_loc
+generated/pandas.MultiIndex.get_loc_level,../reference/api/pandas.MultiIndex.get_loc_level
+generated/pandas.MultiIndex,../reference/api/pandas.MultiIndex
+generated/pandas.MultiIndex.is_lexsorted,../reference/api/pandas.MultiIndex.is_lexsorted
+generated/pandas.MultiIndex.levels,../reference/api/pandas.MultiIndex.levels
+generated/pandas.MultiIndex.levshape,../reference/api/pandas.MultiIndex.levshape
+generated/pandas.MultiIndex.names,../reference/api/pandas.MultiIndex.names
+generated/pandas.MultiIndex.nlevels,../reference/api/pandas.MultiIndex.nlevels
+generated/pandas.MultiIndex.remove_unused_levels,../reference/api/pandas.MultiIndex.remove_unused_levels
+generated/pandas.MultiIndex.reorder_levels,../reference/api/pandas.MultiIndex.reorder_levels
+generated/pandas.MultiIndex.set_codes,../reference/api/pandas.MultiIndex.set_codes
+generated/pandas.MultiIndex.set_levels,../reference/api/pandas.MultiIndex.set_levels
+generated/pandas.MultiIndex.sortlevel,../reference/api/pandas.MultiIndex.sortlevel
+generated/pandas.MultiIndex.swaplevel,../reference/api/pandas.MultiIndex.swaplevel
+generated/pandas.MultiIndex.to_flat_index,../reference/api/pandas.MultiIndex.to_flat_index
+generated/pandas.MultiIndex.to_frame,../reference/api/pandas.MultiIndex.to_frame
+generated/pandas.MultiIndex.to_hierarchical,../reference/api/pandas.MultiIndex.to_hierarchical
+generated/pandas.notna,../reference/api/pandas.notna
+generated/pandas.notnull,../reference/api/pandas.notnull
+generated/pandas.option_context,../reference/api/pandas.option_context
+generated/pandas.Panel.abs,../reference/api/pandas.Panel.abs
+generated/pandas.Panel.add,../reference/api/pandas.Panel.add
+generated/pandas.Panel.add_prefix,../reference/api/pandas.Panel.add_prefix
+generated/pandas.Panel.add_suffix,../reference/api/pandas.Panel.add_suffix
+generated/pandas.Panel.agg,../reference/api/pandas.Panel.agg
+generated/pandas.Panel.aggregate,../reference/api/pandas.Panel.aggregate
+generated/pandas.Panel.align,../reference/api/pandas.Panel.align
+generated/pandas.Panel.all,../reference/api/pandas.Panel.all
+generated/pandas.Panel.any,../reference/api/pandas.Panel.any
+generated/pandas.Panel.apply,../reference/api/pandas.Panel.apply
+generated/pandas.Panel.as_blocks,../reference/api/pandas.Panel.as_blocks
+generated/pandas.Panel.asfreq,../reference/api/pandas.Panel.asfreq
+generated/pandas.Panel.as_matrix,../reference/api/pandas.Panel.as_matrix
+generated/pandas.Panel.asof,../reference/api/pandas.Panel.asof
+generated/pandas.Panel.astype,../reference/api/pandas.Panel.astype
+generated/pandas.Panel.at,../reference/api/pandas.Panel.at
+generated/pandas.Panel.at_time,../reference/api/pandas.Panel.at_time
+generated/pandas.Panel.axes,../reference/api/pandas.Panel.axes
+generated/pandas.Panel.between_time,../reference/api/pandas.Panel.between_time
+generated/pandas.Panel.bfill,../reference/api/pandas.Panel.bfill
+generated/pandas.Panel.blocks,../reference/api/pandas.Panel.blocks
+generated/pandas.Panel.bool,../reference/api/pandas.Panel.bool
+generated/pandas.Panel.clip,../reference/api/pandas.Panel.clip
+generated/pandas.Panel.clip_lower,../reference/api/pandas.Panel.clip_lower
+generated/pandas.Panel.clip_upper,../reference/api/pandas.Panel.clip_upper
+generated/pandas.Panel.compound,../reference/api/pandas.Panel.compound
+generated/pandas.Panel.conform,../reference/api/pandas.Panel.conform
+generated/pandas.Panel.convert_objects,../reference/api/pandas.Panel.convert_objects
+generated/pandas.Panel.copy,../reference/api/pandas.Panel.copy
+generated/pandas.Panel.count,../reference/api/pandas.Panel.count
+generated/pandas.Panel.cummax,../reference/api/pandas.Panel.cummax
+generated/pandas.Panel.cummin,../reference/api/pandas.Panel.cummin
+generated/pandas.Panel.cumprod,../reference/api/pandas.Panel.cumprod
+generated/pandas.Panel.cumsum,../reference/api/pandas.Panel.cumsum
+generated/pandas.Panel.describe,../reference/api/pandas.Panel.describe
+generated/pandas.Panel.div,../reference/api/pandas.Panel.div
+generated/pandas.Panel.divide,../reference/api/pandas.Panel.divide
+generated/pandas.Panel.drop,../reference/api/pandas.Panel.drop
+generated/pandas.Panel.droplevel,../reference/api/pandas.Panel.droplevel
+generated/pandas.Panel.dropna,../reference/api/pandas.Panel.dropna
+generated/pandas.Panel.dtypes,../reference/api/pandas.Panel.dtypes
+generated/pandas.Panel.empty,../reference/api/pandas.Panel.empty
+generated/pandas.Panel.eq,../reference/api/pandas.Panel.eq
+generated/pandas.Panel.equals,../reference/api/pandas.Panel.equals
+generated/pandas.Panel.ffill,../reference/api/pandas.Panel.ffill
+generated/pandas.Panel.fillna,../reference/api/pandas.Panel.fillna
+generated/pandas.Panel.filter,../reference/api/pandas.Panel.filter
+generated/pandas.Panel.first,../reference/api/pandas.Panel.first
+generated/pandas.Panel.first_valid_index,../reference/api/pandas.Panel.first_valid_index
+generated/pandas.Panel.floordiv,../reference/api/pandas.Panel.floordiv
+generated/pandas.Panel.from_dict,../reference/api/pandas.Panel.from_dict
+generated/pandas.Panel.fromDict,../reference/api/pandas.Panel.fromDict
+generated/pandas.Panel.ftypes,../reference/api/pandas.Panel.ftypes
+generated/pandas.Panel.ge,../reference/api/pandas.Panel.ge
+generated/pandas.Panel.get_dtype_counts,../reference/api/pandas.Panel.get_dtype_counts
+generated/pandas.Panel.get_ftype_counts,../reference/api/pandas.Panel.get_ftype_counts
+generated/pandas.Panel.get,../reference/api/pandas.Panel.get
+generated/pandas.Panel.get_value,../reference/api/pandas.Panel.get_value
+generated/pandas.Panel.get_values,../reference/api/pandas.Panel.get_values
+generated/pandas.Panel.groupby,../reference/api/pandas.Panel.groupby
+generated/pandas.Panel.gt,../reference/api/pandas.Panel.gt
+generated/pandas.Panel.head,../reference/api/pandas.Panel.head
+generated/pandas.Panel,../reference/api/pandas.Panel
+generated/pandas.Panel.iat,../reference/api/pandas.Panel.iat
+generated/pandas.Panel.iloc,../reference/api/pandas.Panel.iloc
+generated/pandas.Panel.infer_objects,../reference/api/pandas.Panel.infer_objects
+generated/pandas.Panel.interpolate,../reference/api/pandas.Panel.interpolate
+generated/pandas.Panel.is_copy,../reference/api/pandas.Panel.is_copy
+generated/pandas.Panel.isna,../reference/api/pandas.Panel.isna
+generated/pandas.Panel.isnull,../reference/api/pandas.Panel.isnull
+generated/pandas.Panel.items,../reference/api/pandas.Panel.items
+generated/pandas.Panel.__iter__,../reference/api/pandas.Panel.__iter__
+generated/pandas.Panel.iteritems,../reference/api/pandas.Panel.iteritems
+generated/pandas.Panel.ix,../reference/api/pandas.Panel.ix
+generated/pandas.Panel.join,../reference/api/pandas.Panel.join
+generated/pandas.Panel.keys,../reference/api/pandas.Panel.keys
+generated/pandas.Panel.kurt,../reference/api/pandas.Panel.kurt
+generated/pandas.Panel.kurtosis,../reference/api/pandas.Panel.kurtosis
+generated/pandas.Panel.last,../reference/api/pandas.Panel.last
+generated/pandas.Panel.last_valid_index,../reference/api/pandas.Panel.last_valid_index
+generated/pandas.Panel.le,../reference/api/pandas.Panel.le
+generated/pandas.Panel.loc,../reference/api/pandas.Panel.loc
+generated/pandas.Panel.lt,../reference/api/pandas.Panel.lt
+generated/pandas.Panel.mad,../reference/api/pandas.Panel.mad
+generated/pandas.Panel.major_axis,../reference/api/pandas.Panel.major_axis
+generated/pandas.Panel.major_xs,../reference/api/pandas.Panel.major_xs
+generated/pandas.Panel.mask,../reference/api/pandas.Panel.mask
+generated/pandas.Panel.max,../reference/api/pandas.Panel.max
+generated/pandas.Panel.mean,../reference/api/pandas.Panel.mean
+generated/pandas.Panel.median,../reference/api/pandas.Panel.median
+generated/pandas.Panel.min,../reference/api/pandas.Panel.min
+generated/pandas.Panel.minor_axis,../reference/api/pandas.Panel.minor_axis
+generated/pandas.Panel.minor_xs,../reference/api/pandas.Panel.minor_xs
+generated/pandas.Panel.mod,../reference/api/pandas.Panel.mod
+generated/pandas.Panel.mul,../reference/api/pandas.Panel.mul
+generated/pandas.Panel.multiply,../reference/api/pandas.Panel.multiply
+generated/pandas.Panel.ndim,../reference/api/pandas.Panel.ndim
+generated/pandas.Panel.ne,../reference/api/pandas.Panel.ne
+generated/pandas.Panel.notna,../reference/api/pandas.Panel.notna
+generated/pandas.Panel.notnull,../reference/api/pandas.Panel.notnull
+generated/pandas.Panel.pct_change,../reference/api/pandas.Panel.pct_change
+generated/pandas.Panel.pipe,../reference/api/pandas.Panel.pipe
+generated/pandas.Panel.pop,../reference/api/pandas.Panel.pop
+generated/pandas.Panel.pow,../reference/api/pandas.Panel.pow
+generated/pandas.Panel.prod,../reference/api/pandas.Panel.prod
+generated/pandas.Panel.product,../reference/api/pandas.Panel.product
+generated/pandas.Panel.radd,../reference/api/pandas.Panel.radd
+generated/pandas.Panel.rank,../reference/api/pandas.Panel.rank
+generated/pandas.Panel.rdiv,../reference/api/pandas.Panel.rdiv
+generated/pandas.Panel.reindex_axis,../reference/api/pandas.Panel.reindex_axis
+generated/pandas.Panel.reindex,../reference/api/pandas.Panel.reindex
+generated/pandas.Panel.reindex_like,../reference/api/pandas.Panel.reindex_like
+generated/pandas.Panel.rename_axis,../reference/api/pandas.Panel.rename_axis
+generated/pandas.Panel.rename,../reference/api/pandas.Panel.rename
+generated/pandas.Panel.replace,../reference/api/pandas.Panel.replace
+generated/pandas.Panel.resample,../reference/api/pandas.Panel.resample
+generated/pandas.Panel.rfloordiv,../reference/api/pandas.Panel.rfloordiv
+generated/pandas.Panel.rmod,../reference/api/pandas.Panel.rmod
+generated/pandas.Panel.rmul,../reference/api/pandas.Panel.rmul
+generated/pandas.Panel.round,../reference/api/pandas.Panel.round
+generated/pandas.Panel.rpow,../reference/api/pandas.Panel.rpow
+generated/pandas.Panel.rsub,../reference/api/pandas.Panel.rsub
+generated/pandas.Panel.rtruediv,../reference/api/pandas.Panel.rtruediv
+generated/pandas.Panel.sample,../reference/api/pandas.Panel.sample
+generated/pandas.Panel.select,../reference/api/pandas.Panel.select
+generated/pandas.Panel.sem,../reference/api/pandas.Panel.sem
+generated/pandas.Panel.set_axis,../reference/api/pandas.Panel.set_axis
+generated/pandas.Panel.set_value,../reference/api/pandas.Panel.set_value
+generated/pandas.Panel.shape,../reference/api/pandas.Panel.shape
+generated/pandas.Panel.shift,../reference/api/pandas.Panel.shift
+generated/pandas.Panel.size,../reference/api/pandas.Panel.size
+generated/pandas.Panel.skew,../reference/api/pandas.Panel.skew
+generated/pandas.Panel.slice_shift,../reference/api/pandas.Panel.slice_shift
+generated/pandas.Panel.sort_index,../reference/api/pandas.Panel.sort_index
+generated/pandas.Panel.sort_values,../reference/api/pandas.Panel.sort_values
+generated/pandas.Panel.squeeze,../reference/api/pandas.Panel.squeeze
+generated/pandas.Panel.std,../reference/api/pandas.Panel.std
+generated/pandas.Panel.sub,../reference/api/pandas.Panel.sub
+generated/pandas.Panel.subtract,../reference/api/pandas.Panel.subtract
+generated/pandas.Panel.sum,../reference/api/pandas.Panel.sum
+generated/pandas.Panel.swapaxes,../reference/api/pandas.Panel.swapaxes
+generated/pandas.Panel.swaplevel,../reference/api/pandas.Panel.swaplevel
+generated/pandas.Panel.tail,../reference/api/pandas.Panel.tail
+generated/pandas.Panel.take,../reference/api/pandas.Panel.take
+generated/pandas.Panel.timetuple,../reference/api/pandas.Panel.timetuple
+generated/pandas.Panel.to_clipboard,../reference/api/pandas.Panel.to_clipboard
+generated/pandas.Panel.to_csv,../reference/api/pandas.Panel.to_csv
+generated/pandas.Panel.to_dense,../reference/api/pandas.Panel.to_dense
+generated/pandas.Panel.to_excel,../reference/api/pandas.Panel.to_excel
+generated/pandas.Panel.to_frame,../reference/api/pandas.Panel.to_frame
+generated/pandas.Panel.to_hdf,../reference/api/pandas.Panel.to_hdf
+generated/pandas.Panel.to_json,../reference/api/pandas.Panel.to_json
+generated/pandas.Panel.to_latex,../reference/api/pandas.Panel.to_latex
+generated/pandas.Panel.to_msgpack,../reference/api/pandas.Panel.to_msgpack
+generated/pandas.Panel.to_pickle,../reference/api/pandas.Panel.to_pickle
+generated/pandas.Panel.to_sparse,../reference/api/pandas.Panel.to_sparse
+generated/pandas.Panel.to_sql,../reference/api/pandas.Panel.to_sql
+generated/pandas.Panel.to_xarray,../reference/api/pandas.Panel.to_xarray
+generated/pandas.Panel.transform,../reference/api/pandas.Panel.transform
+generated/pandas.Panel.transpose,../reference/api/pandas.Panel.transpose
+generated/pandas.Panel.truediv,../reference/api/pandas.Panel.truediv
+generated/pandas.Panel.truncate,../reference/api/pandas.Panel.truncate
+generated/pandas.Panel.tshift,../reference/api/pandas.Panel.tshift
+generated/pandas.Panel.tz_convert,../reference/api/pandas.Panel.tz_convert
+generated/pandas.Panel.tz_localize,../reference/api/pandas.Panel.tz_localize
+generated/pandas.Panel.update,../reference/api/pandas.Panel.update
+generated/pandas.Panel.values,../reference/api/pandas.Panel.values
+generated/pandas.Panel.var,../reference/api/pandas.Panel.var
+generated/pandas.Panel.where,../reference/api/pandas.Panel.where
+generated/pandas.Panel.xs,../reference/api/pandas.Panel.xs
+generated/pandas.Period.asfreq,../reference/api/pandas.Period.asfreq
+generated/pandas.Period.day,../reference/api/pandas.Period.day
+generated/pandas.Period.dayofweek,../reference/api/pandas.Period.dayofweek
+generated/pandas.Period.dayofyear,../reference/api/pandas.Period.dayofyear
+generated/pandas.Period.days_in_month,../reference/api/pandas.Period.days_in_month
+generated/pandas.Period.daysinmonth,../reference/api/pandas.Period.daysinmonth
+generated/pandas.Period.end_time,../reference/api/pandas.Period.end_time
+generated/pandas.Period.freq,../reference/api/pandas.Period.freq
+generated/pandas.Period.freqstr,../reference/api/pandas.Period.freqstr
+generated/pandas.Period.hour,../reference/api/pandas.Period.hour
+generated/pandas.Period,../reference/api/pandas.Period
+generated/pandas.PeriodIndex.asfreq,../reference/api/pandas.PeriodIndex.asfreq
+generated/pandas.PeriodIndex.day,../reference/api/pandas.PeriodIndex.day
+generated/pandas.PeriodIndex.dayofweek,../reference/api/pandas.PeriodIndex.dayofweek
+generated/pandas.PeriodIndex.dayofyear,../reference/api/pandas.PeriodIndex.dayofyear
+generated/pandas.PeriodIndex.days_in_month,../reference/api/pandas.PeriodIndex.days_in_month
+generated/pandas.PeriodIndex.daysinmonth,../reference/api/pandas.PeriodIndex.daysinmonth
+generated/pandas.PeriodIndex.end_time,../reference/api/pandas.PeriodIndex.end_time
+generated/pandas.PeriodIndex.freq,../reference/api/pandas.PeriodIndex.freq
+generated/pandas.PeriodIndex.freqstr,../reference/api/pandas.PeriodIndex.freqstr
+generated/pandas.PeriodIndex.hour,../reference/api/pandas.PeriodIndex.hour
+generated/pandas.PeriodIndex,../reference/api/pandas.PeriodIndex
+generated/pandas.PeriodIndex.is_leap_year,../reference/api/pandas.PeriodIndex.is_leap_year
+generated/pandas.PeriodIndex.minute,../reference/api/pandas.PeriodIndex.minute
+generated/pandas.PeriodIndex.month,../reference/api/pandas.PeriodIndex.month
+generated/pandas.PeriodIndex.quarter,../reference/api/pandas.PeriodIndex.quarter
+generated/pandas.PeriodIndex.qyear,../reference/api/pandas.PeriodIndex.qyear
+generated/pandas.PeriodIndex.second,../reference/api/pandas.PeriodIndex.second
+generated/pandas.PeriodIndex.start_time,../reference/api/pandas.PeriodIndex.start_time
+generated/pandas.PeriodIndex.strftime,../reference/api/pandas.PeriodIndex.strftime
+generated/pandas.PeriodIndex.to_timestamp,../reference/api/pandas.PeriodIndex.to_timestamp
+generated/pandas.PeriodIndex.weekday,../reference/api/pandas.PeriodIndex.weekday
+generated/pandas.PeriodIndex.week,../reference/api/pandas.PeriodIndex.week
+generated/pandas.PeriodIndex.weekofyear,../reference/api/pandas.PeriodIndex.weekofyear
+generated/pandas.PeriodIndex.year,../reference/api/pandas.PeriodIndex.year
+generated/pandas.Period.is_leap_year,../reference/api/pandas.Period.is_leap_year
+generated/pandas.Period.minute,../reference/api/pandas.Period.minute
+generated/pandas.Period.month,../reference/api/pandas.Period.month
+generated/pandas.Period.now,../reference/api/pandas.Period.now
+generated/pandas.Period.ordinal,../reference/api/pandas.Period.ordinal
+generated/pandas.Period.quarter,../reference/api/pandas.Period.quarter
+generated/pandas.Period.qyear,../reference/api/pandas.Period.qyear
+generated/pandas.period_range,../reference/api/pandas.period_range
+generated/pandas.Period.second,../reference/api/pandas.Period.second
+generated/pandas.Period.start_time,../reference/api/pandas.Period.start_time
+generated/pandas.Period.strftime,../reference/api/pandas.Period.strftime
+generated/pandas.Period.to_timestamp,../reference/api/pandas.Period.to_timestamp
+generated/pandas.Period.weekday,../reference/api/pandas.Period.weekday
+generated/pandas.Period.week,../reference/api/pandas.Period.week
+generated/pandas.Period.weekofyear,../reference/api/pandas.Period.weekofyear
+generated/pandas.Period.year,../reference/api/pandas.Period.year
+generated/pandas.pivot,../reference/api/pandas.pivot
+generated/pandas.pivot_table,../reference/api/pandas.pivot_table
+generated/pandas.plotting.andrews_curves,../reference/api/pandas.plotting.andrews_curves
+generated/pandas.plotting.bootstrap_plot,../reference/api/pandas.plotting.bootstrap_plot
+generated/pandas.plotting.deregister_matplotlib_converters,../reference/api/pandas.plotting.deregister_matplotlib_converters
+generated/pandas.plotting.lag_plot,../reference/api/pandas.plotting.lag_plot
+generated/pandas.plotting.parallel_coordinates,../reference/api/pandas.plotting.parallel_coordinates
+generated/pandas.plotting.radviz,../reference/api/pandas.plotting.radviz
+generated/pandas.plotting.register_matplotlib_converters,../reference/api/pandas.plotting.register_matplotlib_converters
+generated/pandas.plotting.scatter_matrix,../reference/api/pandas.plotting.scatter_matrix
+generated/pandas.qcut,../reference/api/pandas.qcut
+generated/pandas.RangeIndex.from_range,../reference/api/pandas.RangeIndex.from_range
+generated/pandas.RangeIndex,../reference/api/pandas.RangeIndex
+generated/pandas.read_clipboard,../reference/api/pandas.read_clipboard
+generated/pandas.read_csv,../reference/api/pandas.read_csv
+generated/pandas.read_excel,../reference/api/pandas.read_excel
+generated/pandas.read_feather,../reference/api/pandas.read_feather
+generated/pandas.read_fwf,../reference/api/pandas.read_fwf
+generated/pandas.read_gbq,../reference/api/pandas.read_gbq
+generated/pandas.read_hdf,../reference/api/pandas.read_hdf
+generated/pandas.read,../reference/api/pandas.read
+generated/pandas.read_json,../reference/api/pandas.read_json
+generated/pandas.read_msgpack,../reference/api/pandas.read_msgpack
+generated/pandas.read_parquet,../reference/api/pandas.read_parquet
+generated/pandas.read_pickle,../reference/api/pandas.read_pickle
+generated/pandas.read_sas,../reference/api/pandas.read_sas
+generated/pandas.read_sql,../reference/api/pandas.read_sql
+generated/pandas.read_sql_query,../reference/api/pandas.read_sql_query
+generated/pandas.read_sql_table,../reference/api/pandas.read_sql_table
+generated/pandas.read_stata,../reference/api/pandas.read_stata
+generated/pandas.read_table,../reference/api/pandas.read_table
+generated/pandas.reset_option,../reference/api/pandas.reset_option
+generated/pandas.Series.abs,../reference/api/pandas.Series.abs
+generated/pandas.Series.add,../reference/api/pandas.Series.add
+generated/pandas.Series.add_prefix,../reference/api/pandas.Series.add_prefix
+generated/pandas.Series.add_suffix,../reference/api/pandas.Series.add_suffix
+generated/pandas.Series.agg,../reference/api/pandas.Series.agg
+generated/pandas.Series.aggregate,../reference/api/pandas.Series.aggregate
+generated/pandas.Series.align,../reference/api/pandas.Series.align
+generated/pandas.Series.all,../reference/api/pandas.Series.all
+generated/pandas.Series.any,../reference/api/pandas.Series.any
+generated/pandas.Series.append,../reference/api/pandas.Series.append
+generated/pandas.Series.apply,../reference/api/pandas.Series.apply
+generated/pandas.Series.argmax,../reference/api/pandas.Series.argmax
+generated/pandas.Series.argmin,../reference/api/pandas.Series.argmin
+generated/pandas.Series.argsort,../reference/api/pandas.Series.argsort
+generated/pandas.Series.__array__,../reference/api/pandas.Series.__array__
+generated/pandas.Series.array,../reference/api/pandas.Series.array
+generated/pandas.Series.as_blocks,../reference/api/pandas.Series.as_blocks
+generated/pandas.Series.asfreq,../reference/api/pandas.Series.asfreq
+generated/pandas.Series.as_matrix,../reference/api/pandas.Series.as_matrix
+generated/pandas.Series.asobject,../reference/api/pandas.Series.asobject
+generated/pandas.Series.asof,../reference/api/pandas.Series.asof
+generated/pandas.Series.astype,../reference/api/pandas.Series.astype
+generated/pandas.Series.at,../reference/api/pandas.Series.at
+generated/pandas.Series.at_time,../reference/api/pandas.Series.at_time
+generated/pandas.Series.autocorr,../reference/api/pandas.Series.autocorr
+generated/pandas.Series.axes,../reference/api/pandas.Series.axes
+generated/pandas.Series.base,../reference/api/pandas.Series.base
+generated/pandas.Series.between,../reference/api/pandas.Series.between
+generated/pandas.Series.between_time,../reference/api/pandas.Series.between_time
+generated/pandas.Series.bfill,../reference/api/pandas.Series.bfill
+generated/pandas.Series.blocks,../reference/api/pandas.Series.blocks
+generated/pandas.Series.bool,../reference/api/pandas.Series.bool
+generated/pandas.Series.cat.add_categories,../reference/api/pandas.Series.cat.add_categories
+generated/pandas.Series.cat.as_ordered,../reference/api/pandas.Series.cat.as_ordered
+generated/pandas.Series.cat.as_unordered,../reference/api/pandas.Series.cat.as_unordered
+generated/pandas.Series.cat.categories,../reference/api/pandas.Series.cat.categories
+generated/pandas.Series.cat.codes,../reference/api/pandas.Series.cat.codes
+generated/pandas.Series.cat,../reference/api/pandas.Series.cat
+generated/pandas.Series.cat.ordered,../reference/api/pandas.Series.cat.ordered
+generated/pandas.Series.cat.remove_categories,../reference/api/pandas.Series.cat.remove_categories
+generated/pandas.Series.cat.remove_unused_categories,../reference/api/pandas.Series.cat.remove_unused_categories
+generated/pandas.Series.cat.rename_categories,../reference/api/pandas.Series.cat.rename_categories
+generated/pandas.Series.cat.reorder_categories,../reference/api/pandas.Series.cat.reorder_categories
+generated/pandas.Series.cat.set_categories,../reference/api/pandas.Series.cat.set_categories
+generated/pandas.Series.clip,../reference/api/pandas.Series.clip
+generated/pandas.Series.clip_lower,../reference/api/pandas.Series.clip_lower
+generated/pandas.Series.clip_upper,../reference/api/pandas.Series.clip_upper
+generated/pandas.Series.combine_first,../reference/api/pandas.Series.combine_first
+generated/pandas.Series.combine,../reference/api/pandas.Series.combine
+generated/pandas.Series.compound,../reference/api/pandas.Series.compound
+generated/pandas.Series.compress,../reference/api/pandas.Series.compress
+generated/pandas.Series.convert_objects,../reference/api/pandas.Series.convert_objects
+generated/pandas.Series.copy,../reference/api/pandas.Series.copy
+generated/pandas.Series.corr,../reference/api/pandas.Series.corr
+generated/pandas.Series.count,../reference/api/pandas.Series.count
+generated/pandas.Series.cov,../reference/api/pandas.Series.cov
+generated/pandas.Series.cummax,../reference/api/pandas.Series.cummax
+generated/pandas.Series.cummin,../reference/api/pandas.Series.cummin
+generated/pandas.Series.cumprod,../reference/api/pandas.Series.cumprod
+generated/pandas.Series.cumsum,../reference/api/pandas.Series.cumsum
+generated/pandas.Series.data,../reference/api/pandas.Series.data
+generated/pandas.Series.describe,../reference/api/pandas.Series.describe
+generated/pandas.Series.diff,../reference/api/pandas.Series.diff
+generated/pandas.Series.div,../reference/api/pandas.Series.div
+generated/pandas.Series.divide,../reference/api/pandas.Series.divide
+generated/pandas.Series.divmod,../reference/api/pandas.Series.divmod
+generated/pandas.Series.dot,../reference/api/pandas.Series.dot
+generated/pandas.Series.drop_duplicates,../reference/api/pandas.Series.drop_duplicates
+generated/pandas.Series.drop,../reference/api/pandas.Series.drop
+generated/pandas.Series.droplevel,../reference/api/pandas.Series.droplevel
+generated/pandas.Series.dropna,../reference/api/pandas.Series.dropna
+generated/pandas.Series.dt.ceil,../reference/api/pandas.Series.dt.ceil
+generated/pandas.Series.dt.components,../reference/api/pandas.Series.dt.components
+generated/pandas.Series.dt.date,../reference/api/pandas.Series.dt.date
+generated/pandas.Series.dt.day,../reference/api/pandas.Series.dt.day
+generated/pandas.Series.dt.day_name,../reference/api/pandas.Series.dt.day_name
+generated/pandas.Series.dt.dayofweek,../reference/api/pandas.Series.dt.dayofweek
+generated/pandas.Series.dt.dayofyear,../reference/api/pandas.Series.dt.dayofyear
+generated/pandas.Series.dt.days,../reference/api/pandas.Series.dt.days
+generated/pandas.Series.dt.days_in_month,../reference/api/pandas.Series.dt.days_in_month
+generated/pandas.Series.dt.daysinmonth,../reference/api/pandas.Series.dt.daysinmonth
+generated/pandas.Series.dt.end_time,../reference/api/pandas.Series.dt.end_time
+generated/pandas.Series.dt.floor,../reference/api/pandas.Series.dt.floor
+generated/pandas.Series.dt.freq,../reference/api/pandas.Series.dt.freq
+generated/pandas.Series.dt.hour,../reference/api/pandas.Series.dt.hour
+generated/pandas.Series.dt,../reference/api/pandas.Series.dt
+generated/pandas.Series.dt.is_leap_year,../reference/api/pandas.Series.dt.is_leap_year
+generated/pandas.Series.dt.is_month_end,../reference/api/pandas.Series.dt.is_month_end
+generated/pandas.Series.dt.is_month_start,../reference/api/pandas.Series.dt.is_month_start
+generated/pandas.Series.dt.is_quarter_end,../reference/api/pandas.Series.dt.is_quarter_end
+generated/pandas.Series.dt.is_quarter_start,../reference/api/pandas.Series.dt.is_quarter_start
+generated/pandas.Series.dt.is_year_end,../reference/api/pandas.Series.dt.is_year_end
+generated/pandas.Series.dt.is_year_start,../reference/api/pandas.Series.dt.is_year_start
+generated/pandas.Series.dt.microsecond,../reference/api/pandas.Series.dt.microsecond
+generated/pandas.Series.dt.microseconds,../reference/api/pandas.Series.dt.microseconds
+generated/pandas.Series.dt.minute,../reference/api/pandas.Series.dt.minute
+generated/pandas.Series.dt.month,../reference/api/pandas.Series.dt.month
+generated/pandas.Series.dt.month_name,../reference/api/pandas.Series.dt.month_name
+generated/pandas.Series.dt.nanosecond,../reference/api/pandas.Series.dt.nanosecond
+generated/pandas.Series.dt.nanoseconds,../reference/api/pandas.Series.dt.nanoseconds
+generated/pandas.Series.dt.normalize,../reference/api/pandas.Series.dt.normalize
+generated/pandas.Series.dt.quarter,../reference/api/pandas.Series.dt.quarter
+generated/pandas.Series.dt.qyear,../reference/api/pandas.Series.dt.qyear
+generated/pandas.Series.dt.round,../reference/api/pandas.Series.dt.round
+generated/pandas.Series.dt.second,../reference/api/pandas.Series.dt.second
+generated/pandas.Series.dt.seconds,../reference/api/pandas.Series.dt.seconds
+generated/pandas.Series.dt.start_time,../reference/api/pandas.Series.dt.start_time
+generated/pandas.Series.dt.strftime,../reference/api/pandas.Series.dt.strftime
+generated/pandas.Series.dt.time,../reference/api/pandas.Series.dt.time
+generated/pandas.Series.dt.timetz,../reference/api/pandas.Series.dt.timetz
+generated/pandas.Series.dt.to_period,../reference/api/pandas.Series.dt.to_period
+generated/pandas.Series.dt.to_pydatetime,../reference/api/pandas.Series.dt.to_pydatetime
+generated/pandas.Series.dt.to_pytimedelta,../reference/api/pandas.Series.dt.to_pytimedelta
+generated/pandas.Series.dt.total_seconds,../reference/api/pandas.Series.dt.total_seconds
+generated/pandas.Series.dt.tz_convert,../reference/api/pandas.Series.dt.tz_convert
+generated/pandas.Series.dt.tz,../reference/api/pandas.Series.dt.tz
+generated/pandas.Series.dt.tz_localize,../reference/api/pandas.Series.dt.tz_localize
+generated/pandas.Series.dt.weekday,../reference/api/pandas.Series.dt.weekday
+generated/pandas.Series.dt.week,../reference/api/pandas.Series.dt.week
+generated/pandas.Series.dt.weekofyear,../reference/api/pandas.Series.dt.weekofyear
+generated/pandas.Series.dt.year,../reference/api/pandas.Series.dt.year
+generated/pandas.Series.dtype,../reference/api/pandas.Series.dtype
+generated/pandas.Series.dtypes,../reference/api/pandas.Series.dtypes
+generated/pandas.Series.duplicated,../reference/api/pandas.Series.duplicated
+generated/pandas.Series.empty,../reference/api/pandas.Series.empty
+generated/pandas.Series.eq,../reference/api/pandas.Series.eq
+generated/pandas.Series.equals,../reference/api/pandas.Series.equals
+generated/pandas.Series.ewm,../reference/api/pandas.Series.ewm
+generated/pandas.Series.expanding,../reference/api/pandas.Series.expanding
+generated/pandas.Series.factorize,../reference/api/pandas.Series.factorize
+generated/pandas.Series.ffill,../reference/api/pandas.Series.ffill
+generated/pandas.Series.fillna,../reference/api/pandas.Series.fillna
+generated/pandas.Series.filter,../reference/api/pandas.Series.filter
+generated/pandas.Series.first,../reference/api/pandas.Series.first
+generated/pandas.Series.first_valid_index,../reference/api/pandas.Series.first_valid_index
+generated/pandas.Series.flags,../reference/api/pandas.Series.flags
+generated/pandas.Series.floordiv,../reference/api/pandas.Series.floordiv
+generated/pandas.Series.from_array,../reference/api/pandas.Series.from_array
+generated/pandas.Series.from_csv,../reference/api/pandas.Series.from_csv
+generated/pandas.Series.ftype,../reference/api/pandas.Series.ftype
+generated/pandas.Series.ftypes,../reference/api/pandas.Series.ftypes
+generated/pandas.Series.ge,../reference/api/pandas.Series.ge
+generated/pandas.Series.get_dtype_counts,../reference/api/pandas.Series.get_dtype_counts
+generated/pandas.Series.get_ftype_counts,../reference/api/pandas.Series.get_ftype_counts
+generated/pandas.Series.get,../reference/api/pandas.Series.get
+generated/pandas.Series.get_value,../reference/api/pandas.Series.get_value
+generated/pandas.Series.get_values,../reference/api/pandas.Series.get_values
+generated/pandas.Series.groupby,../reference/api/pandas.Series.groupby
+generated/pandas.Series.gt,../reference/api/pandas.Series.gt
+generated/pandas.Series.hasnans,../reference/api/pandas.Series.hasnans
+generated/pandas.Series.head,../reference/api/pandas.Series.head
+generated/pandas.Series.hist,../reference/api/pandas.Series.hist
+generated/pandas.Series,../reference/api/pandas.Series
+generated/pandas.Series.iat,../reference/api/pandas.Series.iat
+generated/pandas.Series.idxmax,../reference/api/pandas.Series.idxmax
+generated/pandas.Series.idxmin,../reference/api/pandas.Series.idxmin
+generated/pandas.Series.iloc,../reference/api/pandas.Series.iloc
+generated/pandas.Series.imag,../reference/api/pandas.Series.imag
+generated/pandas.Series.index,../reference/api/pandas.Series.index
+generated/pandas.Series.infer_objects,../reference/api/pandas.Series.infer_objects
+generated/pandas.Series.interpolate,../reference/api/pandas.Series.interpolate
+generated/pandas.Series.is_copy,../reference/api/pandas.Series.is_copy
+generated/pandas.Series.isin,../reference/api/pandas.Series.isin
+generated/pandas.Series.is_monotonic_decreasing,../reference/api/pandas.Series.is_monotonic_decreasing
+generated/pandas.Series.is_monotonic,../reference/api/pandas.Series.is_monotonic
+generated/pandas.Series.is_monotonic_increasing,../reference/api/pandas.Series.is_monotonic_increasing
+generated/pandas.Series.isna,../reference/api/pandas.Series.isna
+generated/pandas.Series.isnull,../reference/api/pandas.Series.isnull
+generated/pandas.Series.is_unique,../reference/api/pandas.Series.is_unique
+generated/pandas.Series.item,../reference/api/pandas.Series.item
+generated/pandas.Series.items,../reference/api/pandas.Series.items
+generated/pandas.Series.itemsize,../reference/api/pandas.Series.itemsize
+generated/pandas.Series.__iter__,../reference/api/pandas.Series.__iter__
+generated/pandas.Series.iteritems,../reference/api/pandas.Series.iteritems
+generated/pandas.Series.ix,../reference/api/pandas.Series.ix
+generated/pandas.Series.keys,../reference/api/pandas.Series.keys
+generated/pandas.Series.kurt,../reference/api/pandas.Series.kurt
+generated/pandas.Series.kurtosis,../reference/api/pandas.Series.kurtosis
+generated/pandas.Series.last,../reference/api/pandas.Series.last
+generated/pandas.Series.last_valid_index,../reference/api/pandas.Series.last_valid_index
+generated/pandas.Series.le,../reference/api/pandas.Series.le
+generated/pandas.Series.loc,../reference/api/pandas.Series.loc
+generated/pandas.Series.lt,../reference/api/pandas.Series.lt
+generated/pandas.Series.mad,../reference/api/pandas.Series.mad
+generated/pandas.Series.map,../reference/api/pandas.Series.map
+generated/pandas.Series.mask,../reference/api/pandas.Series.mask
+generated/pandas.Series.max,../reference/api/pandas.Series.max
+generated/pandas.Series.mean,../reference/api/pandas.Series.mean
+generated/pandas.Series.median,../reference/api/pandas.Series.median
+generated/pandas.Series.memory_usage,../reference/api/pandas.Series.memory_usage
+generated/pandas.Series.min,../reference/api/pandas.Series.min
+generated/pandas.Series.mode,../reference/api/pandas.Series.mode
+generated/pandas.Series.mod,../reference/api/pandas.Series.mod
+generated/pandas.Series.mul,../reference/api/pandas.Series.mul
+generated/pandas.Series.multiply,../reference/api/pandas.Series.multiply
+generated/pandas.Series.name,../reference/api/pandas.Series.name
+generated/pandas.Series.nbytes,../reference/api/pandas.Series.nbytes
+generated/pandas.Series.ndim,../reference/api/pandas.Series.ndim
+generated/pandas.Series.ne,../reference/api/pandas.Series.ne
+generated/pandas.Series.nlargest,../reference/api/pandas.Series.nlargest
+generated/pandas.Series.nonzero,../reference/api/pandas.Series.nonzero
+generated/pandas.Series.notna,../reference/api/pandas.Series.notna
+generated/pandas.Series.notnull,../reference/api/pandas.Series.notnull
+generated/pandas.Series.nsmallest,../reference/api/pandas.Series.nsmallest
+generated/pandas.Series.nunique,../reference/api/pandas.Series.nunique
+generated/pandas.Series.pct_change,../reference/api/pandas.Series.pct_change
+generated/pandas.Series.pipe,../reference/api/pandas.Series.pipe
+generated/pandas.Series.plot.area,../reference/api/pandas.Series.plot.area
+generated/pandas.Series.plot.barh,../reference/api/pandas.Series.plot.barh
+generated/pandas.Series.plot.bar,../reference/api/pandas.Series.plot.bar
+generated/pandas.Series.plot.box,../reference/api/pandas.Series.plot.box
+generated/pandas.Series.plot.density,../reference/api/pandas.Series.plot.density
+generated/pandas.Series.plot.hist,../reference/api/pandas.Series.plot.hist
+generated/pandas.Series.plot,../reference/api/pandas.Series.plot
+generated/pandas.Series.plot.kde,../reference/api/pandas.Series.plot.kde
+generated/pandas.Series.plot.line,../reference/api/pandas.Series.plot.line
+generated/pandas.Series.plot.pie,../reference/api/pandas.Series.plot.pie
+generated/pandas.Series.pop,../reference/api/pandas.Series.pop
+generated/pandas.Series.pow,../reference/api/pandas.Series.pow
+generated/pandas.Series.prod,../reference/api/pandas.Series.prod
+generated/pandas.Series.product,../reference/api/pandas.Series.product
+generated/pandas.Series.ptp,../reference/api/pandas.Series.ptp
+generated/pandas.Series.put,../reference/api/pandas.Series.put
+generated/pandas.Series.quantile,../reference/api/pandas.Series.quantile
+generated/pandas.Series.radd,../reference/api/pandas.Series.radd
+generated/pandas.Series.rank,../reference/api/pandas.Series.rank
+generated/pandas.Series.ravel,../reference/api/pandas.Series.ravel
+generated/pandas.Series.rdiv,../reference/api/pandas.Series.rdiv
+generated/pandas.Series.rdivmod,../reference/api/pandas.Series.rdivmod
+generated/pandas.Series.real,../reference/api/pandas.Series.real
+generated/pandas.Series.reindex_axis,../reference/api/pandas.Series.reindex_axis
+generated/pandas.Series.reindex,../reference/api/pandas.Series.reindex
+generated/pandas.Series.reindex_like,../reference/api/pandas.Series.reindex_like
+generated/pandas.Series.rename_axis,../reference/api/pandas.Series.rename_axis
+generated/pandas.Series.rename,../reference/api/pandas.Series.rename
+generated/pandas.Series.reorder_levels,../reference/api/pandas.Series.reorder_levels
+generated/pandas.Series.repeat,../reference/api/pandas.Series.repeat
+generated/pandas.Series.replace,../reference/api/pandas.Series.replace
+generated/pandas.Series.resample,../reference/api/pandas.Series.resample
+generated/pandas.Series.reset_index,../reference/api/pandas.Series.reset_index
+generated/pandas.Series.rfloordiv,../reference/api/pandas.Series.rfloordiv
+generated/pandas.Series.rmod,../reference/api/pandas.Series.rmod
+generated/pandas.Series.rmul,../reference/api/pandas.Series.rmul
+generated/pandas.Series.rolling,../reference/api/pandas.Series.rolling
+generated/pandas.Series.round,../reference/api/pandas.Series.round
+generated/pandas.Series.rpow,../reference/api/pandas.Series.rpow
+generated/pandas.Series.rsub,../reference/api/pandas.Series.rsub
+generated/pandas.Series.rtruediv,../reference/api/pandas.Series.rtruediv
+generated/pandas.Series.sample,../reference/api/pandas.Series.sample
+generated/pandas.Series.searchsorted,../reference/api/pandas.Series.searchsorted
+generated/pandas.Series.select,../reference/api/pandas.Series.select
+generated/pandas.Series.sem,../reference/api/pandas.Series.sem
+generated/pandas.Series.set_axis,../reference/api/pandas.Series.set_axis
+generated/pandas.Series.set_value,../reference/api/pandas.Series.set_value
+generated/pandas.Series.shape,../reference/api/pandas.Series.shape
+generated/pandas.Series.shift,../reference/api/pandas.Series.shift
+generated/pandas.Series.size,../reference/api/pandas.Series.size
+generated/pandas.Series.skew,../reference/api/pandas.Series.skew
+generated/pandas.Series.slice_shift,../reference/api/pandas.Series.slice_shift
+generated/pandas.Series.sort_index,../reference/api/pandas.Series.sort_index
+generated/pandas.Series.sort_values,../reference/api/pandas.Series.sort_values
+generated/pandas.Series.sparse.density,../reference/api/pandas.Series.sparse.density
+generated/pandas.Series.sparse.fill_value,../reference/api/pandas.Series.sparse.fill_value
+generated/pandas.Series.sparse.from_coo,../reference/api/pandas.Series.sparse.from_coo
+generated/pandas.Series.sparse.npoints,../reference/api/pandas.Series.sparse.npoints
+generated/pandas.Series.sparse.sp_values,../reference/api/pandas.Series.sparse.sp_values
+generated/pandas.Series.sparse.to_coo,../reference/api/pandas.Series.sparse.to_coo
+generated/pandas.Series.squeeze,../reference/api/pandas.Series.squeeze
+generated/pandas.Series.std,../reference/api/pandas.Series.std
+generated/pandas.Series.str.capitalize,../reference/api/pandas.Series.str.capitalize
+generated/pandas.Series.str.cat,../reference/api/pandas.Series.str.cat
+generated/pandas.Series.str.center,../reference/api/pandas.Series.str.center
+generated/pandas.Series.str.contains,../reference/api/pandas.Series.str.contains
+generated/pandas.Series.str.count,../reference/api/pandas.Series.str.count
+generated/pandas.Series.str.decode,../reference/api/pandas.Series.str.decode
+generated/pandas.Series.str.encode,../reference/api/pandas.Series.str.encode
+generated/pandas.Series.str.endswith,../reference/api/pandas.Series.str.endswith
+generated/pandas.Series.str.extractall,../reference/api/pandas.Series.str.extractall
+generated/pandas.Series.str.extract,../reference/api/pandas.Series.str.extract
+generated/pandas.Series.str.findall,../reference/api/pandas.Series.str.findall
+generated/pandas.Series.str.find,../reference/api/pandas.Series.str.find
+generated/pandas.Series.str.get_dummies,../reference/api/pandas.Series.str.get_dummies
+generated/pandas.Series.str.get,../reference/api/pandas.Series.str.get
+generated/pandas.Series.str,../reference/api/pandas.Series.str
+generated/pandas.Series.strides,../reference/api/pandas.Series.strides
+generated/pandas.Series.str.index,../reference/api/pandas.Series.str.index
+generated/pandas.Series.str.isalnum,../reference/api/pandas.Series.str.isalnum
+generated/pandas.Series.str.isalpha,../reference/api/pandas.Series.str.isalpha
+generated/pandas.Series.str.isdecimal,../reference/api/pandas.Series.str.isdecimal
+generated/pandas.Series.str.isdigit,../reference/api/pandas.Series.str.isdigit
+generated/pandas.Series.str.islower,../reference/api/pandas.Series.str.islower
+generated/pandas.Series.str.isnumeric,../reference/api/pandas.Series.str.isnumeric
+generated/pandas.Series.str.isspace,../reference/api/pandas.Series.str.isspace
+generated/pandas.Series.str.istitle,../reference/api/pandas.Series.str.istitle
+generated/pandas.Series.str.isupper,../reference/api/pandas.Series.str.isupper
+generated/pandas.Series.str.join,../reference/api/pandas.Series.str.join
+generated/pandas.Series.str.len,../reference/api/pandas.Series.str.len
+generated/pandas.Series.str.ljust,../reference/api/pandas.Series.str.ljust
+generated/pandas.Series.str.lower,../reference/api/pandas.Series.str.lower
+generated/pandas.Series.str.lstrip,../reference/api/pandas.Series.str.lstrip
+generated/pandas.Series.str.match,../reference/api/pandas.Series.str.match
+generated/pandas.Series.str.normalize,../reference/api/pandas.Series.str.normalize
+generated/pandas.Series.str.pad,../reference/api/pandas.Series.str.pad
+generated/pandas.Series.str.partition,../reference/api/pandas.Series.str.partition
+generated/pandas.Series.str.repeat,../reference/api/pandas.Series.str.repeat
+generated/pandas.Series.str.replace,../reference/api/pandas.Series.str.replace
+generated/pandas.Series.str.rfind,../reference/api/pandas.Series.str.rfind
+generated/pandas.Series.str.rindex,../reference/api/pandas.Series.str.rindex
+generated/pandas.Series.str.rjust,../reference/api/pandas.Series.str.rjust
+generated/pandas.Series.str.rpartition,../reference/api/pandas.Series.str.rpartition
+generated/pandas.Series.str.rsplit,../reference/api/pandas.Series.str.rsplit
+generated/pandas.Series.str.rstrip,../reference/api/pandas.Series.str.rstrip
+generated/pandas.Series.str.slice,../reference/api/pandas.Series.str.slice
+generated/pandas.Series.str.slice_replace,../reference/api/pandas.Series.str.slice_replace
+generated/pandas.Series.str.split,../reference/api/pandas.Series.str.split
+generated/pandas.Series.str.startswith,../reference/api/pandas.Series.str.startswith
+generated/pandas.Series.str.strip,../reference/api/pandas.Series.str.strip
+generated/pandas.Series.str.swapcase,../reference/api/pandas.Series.str.swapcase
+generated/pandas.Series.str.title,../reference/api/pandas.Series.str.title
+generated/pandas.Series.str.translate,../reference/api/pandas.Series.str.translate
+generated/pandas.Series.str.upper,../reference/api/pandas.Series.str.upper
+generated/pandas.Series.str.wrap,../reference/api/pandas.Series.str.wrap
+generated/pandas.Series.str.zfill,../reference/api/pandas.Series.str.zfill
+generated/pandas.Series.sub,../reference/api/pandas.Series.sub
+generated/pandas.Series.subtract,../reference/api/pandas.Series.subtract
+generated/pandas.Series.sum,../reference/api/pandas.Series.sum
+generated/pandas.Series.swapaxes,../reference/api/pandas.Series.swapaxes
+generated/pandas.Series.swaplevel,../reference/api/pandas.Series.swaplevel
+generated/pandas.Series.tail,../reference/api/pandas.Series.tail
+generated/pandas.Series.take,../reference/api/pandas.Series.take
+generated/pandas.Series.T,../reference/api/pandas.Series.T
+generated/pandas.Series.timetuple,../reference/api/pandas.Series.timetuple
+generated/pandas.Series.to_clipboard,../reference/api/pandas.Series.to_clipboard
+generated/pandas.Series.to_csv,../reference/api/pandas.Series.to_csv
+generated/pandas.Series.to_dense,../reference/api/pandas.Series.to_dense
+generated/pandas.Series.to_dict,../reference/api/pandas.Series.to_dict
+generated/pandas.Series.to_excel,../reference/api/pandas.Series.to_excel
+generated/pandas.Series.to_frame,../reference/api/pandas.Series.to_frame
+generated/pandas.Series.to_hdf,../reference/api/pandas.Series.to_hdf
+generated/pandas.Series.to_json,../reference/api/pandas.Series.to_json
+generated/pandas.Series.to_latex,../reference/api/pandas.Series.to_latex
+generated/pandas.Series.to_list,../reference/api/pandas.Series.to_list
+generated/pandas.Series.tolist,../reference/api/pandas.Series.tolist
+generated/pandas.Series.to_msgpack,../reference/api/pandas.Series.to_msgpack
+generated/pandas.Series.to_numpy,../reference/api/pandas.Series.to_numpy
+generated/pandas.Series.to_period,../reference/api/pandas.Series.to_period
+generated/pandas.Series.to_pickle,../reference/api/pandas.Series.to_pickle
+generated/pandas.Series.to_sparse,../reference/api/pandas.Series.to_sparse
+generated/pandas.Series.to_sql,../reference/api/pandas.Series.to_sql
+generated/pandas.Series.to_string,../reference/api/pandas.Series.to_string
+generated/pandas.Series.to_timestamp,../reference/api/pandas.Series.to_timestamp
+generated/pandas.Series.to_xarray,../reference/api/pandas.Series.to_xarray
+generated/pandas.Series.transform,../reference/api/pandas.Series.transform
+generated/pandas.Series.transpose,../reference/api/pandas.Series.transpose
+generated/pandas.Series.truediv,../reference/api/pandas.Series.truediv
+generated/pandas.Series.truncate,../reference/api/pandas.Series.truncate
+generated/pandas.Series.tshift,../reference/api/pandas.Series.tshift
+generated/pandas.Series.tz_convert,../reference/api/pandas.Series.tz_convert
+generated/pandas.Series.tz_localize,../reference/api/pandas.Series.tz_localize
+generated/pandas.Series.unique,../reference/api/pandas.Series.unique
+generated/pandas.Series.unstack,../reference/api/pandas.Series.unstack
+generated/pandas.Series.update,../reference/api/pandas.Series.update
+generated/pandas.Series.valid,../reference/api/pandas.Series.valid
+generated/pandas.Series.value_counts,../reference/api/pandas.Series.value_counts
+generated/pandas.Series.values,../reference/api/pandas.Series.values
+generated/pandas.Series.var,../reference/api/pandas.Series.var
+generated/pandas.Series.view,../reference/api/pandas.Series.view
+generated/pandas.Series.where,../reference/api/pandas.Series.where
+generated/pandas.Series.xs,../reference/api/pandas.Series.xs
+generated/pandas.set_option,../reference/api/pandas.set_option
+generated/pandas.SparseDataFrame.to_coo,../reference/api/pandas.SparseDataFrame.to_coo
+generated/pandas.SparseSeries.from_coo,../reference/api/pandas.SparseSeries.from_coo
+generated/pandas.SparseSeries.to_coo,../reference/api/pandas.SparseSeries.to_coo
+generated/pandas.test,../reference/api/pandas.test
+generated/pandas.testing.assert_frame_equal,../reference/api/pandas.testing.assert_frame_equal
+generated/pandas.testing.assert_index_equal,../reference/api/pandas.testing.assert_index_equal
+generated/pandas.testing.assert_series_equal,../reference/api/pandas.testing.assert_series_equal
+generated/pandas.Timedelta.asm8,../reference/api/pandas.Timedelta.asm8
+generated/pandas.Timedelta.ceil,../reference/api/pandas.Timedelta.ceil
+generated/pandas.Timedelta.components,../reference/api/pandas.Timedelta.components
+generated/pandas.Timedelta.days,../reference/api/pandas.Timedelta.days
+generated/pandas.Timedelta.delta,../reference/api/pandas.Timedelta.delta
+generated/pandas.Timedelta.floor,../reference/api/pandas.Timedelta.floor
+generated/pandas.Timedelta.freq,../reference/api/pandas.Timedelta.freq
+generated/pandas.Timedelta,../reference/api/pandas.Timedelta
+generated/pandas.TimedeltaIndex.ceil,../reference/api/pandas.TimedeltaIndex.ceil
+generated/pandas.TimedeltaIndex.components,../reference/api/pandas.TimedeltaIndex.components
+generated/pandas.TimedeltaIndex.days,../reference/api/pandas.TimedeltaIndex.days
+generated/pandas.TimedeltaIndex.floor,../reference/api/pandas.TimedeltaIndex.floor
+generated/pandas.TimedeltaIndex,../reference/api/pandas.TimedeltaIndex
+generated/pandas.TimedeltaIndex.inferred_freq,../reference/api/pandas.TimedeltaIndex.inferred_freq
+generated/pandas.TimedeltaIndex.microseconds,../reference/api/pandas.TimedeltaIndex.microseconds
+generated/pandas.TimedeltaIndex.nanoseconds,../reference/api/pandas.TimedeltaIndex.nanoseconds
+generated/pandas.TimedeltaIndex.round,../reference/api/pandas.TimedeltaIndex.round
+generated/pandas.TimedeltaIndex.seconds,../reference/api/pandas.TimedeltaIndex.seconds
+generated/pandas.TimedeltaIndex.to_frame,../reference/api/pandas.TimedeltaIndex.to_frame
+generated/pandas.TimedeltaIndex.to_pytimedelta,../reference/api/pandas.TimedeltaIndex.to_pytimedelta
+generated/pandas.TimedeltaIndex.to_series,../reference/api/pandas.TimedeltaIndex.to_series
+generated/pandas.Timedelta.isoformat,../reference/api/pandas.Timedelta.isoformat
+generated/pandas.Timedelta.is_populated,../reference/api/pandas.Timedelta.is_populated
+generated/pandas.Timedelta.max,../reference/api/pandas.Timedelta.max
+generated/pandas.Timedelta.microseconds,../reference/api/pandas.Timedelta.microseconds
+generated/pandas.Timedelta.min,../reference/api/pandas.Timedelta.min
+generated/pandas.Timedelta.nanoseconds,../reference/api/pandas.Timedelta.nanoseconds
+generated/pandas.timedelta_range,../reference/api/pandas.timedelta_range
+generated/pandas.Timedelta.resolution,../reference/api/pandas.Timedelta.resolution
+generated/pandas.Timedelta.round,../reference/api/pandas.Timedelta.round
+generated/pandas.Timedelta.seconds,../reference/api/pandas.Timedelta.seconds
+generated/pandas.Timedelta.to_pytimedelta,../reference/api/pandas.Timedelta.to_pytimedelta
+generated/pandas.Timedelta.total_seconds,../reference/api/pandas.Timedelta.total_seconds
+generated/pandas.Timedelta.to_timedelta64,../reference/api/pandas.Timedelta.to_timedelta64
+generated/pandas.Timedelta.value,../reference/api/pandas.Timedelta.value
+generated/pandas.Timedelta.view,../reference/api/pandas.Timedelta.view
+generated/pandas.Timestamp.asm8,../reference/api/pandas.Timestamp.asm8
+generated/pandas.Timestamp.astimezone,../reference/api/pandas.Timestamp.astimezone
+generated/pandas.Timestamp.ceil,../reference/api/pandas.Timestamp.ceil
+generated/pandas.Timestamp.combine,../reference/api/pandas.Timestamp.combine
+generated/pandas.Timestamp.ctime,../reference/api/pandas.Timestamp.ctime
+generated/pandas.Timestamp.date,../reference/api/pandas.Timestamp.date
+generated/pandas.Timestamp.day,../reference/api/pandas.Timestamp.day
+generated/pandas.Timestamp.day_name,../reference/api/pandas.Timestamp.day_name
+generated/pandas.Timestamp.dayofweek,../reference/api/pandas.Timestamp.dayofweek
+generated/pandas.Timestamp.dayofyear,../reference/api/pandas.Timestamp.dayofyear
+generated/pandas.Timestamp.days_in_month,../reference/api/pandas.Timestamp.days_in_month
+generated/pandas.Timestamp.daysinmonth,../reference/api/pandas.Timestamp.daysinmonth
+generated/pandas.Timestamp.dst,../reference/api/pandas.Timestamp.dst
+generated/pandas.Timestamp.floor,../reference/api/pandas.Timestamp.floor
+generated/pandas.Timestamp.fold,../reference/api/pandas.Timestamp.fold
+generated/pandas.Timestamp.freq,../reference/api/pandas.Timestamp.freq
+generated/pandas.Timestamp.freqstr,../reference/api/pandas.Timestamp.freqstr
+generated/pandas.Timestamp.fromisoformat,../reference/api/pandas.Timestamp.fromisoformat
+generated/pandas.Timestamp.fromordinal,../reference/api/pandas.Timestamp.fromordinal
+generated/pandas.Timestamp.fromtimestamp,../reference/api/pandas.Timestamp.fromtimestamp
+generated/pandas.Timestamp.hour,../reference/api/pandas.Timestamp.hour
+generated/pandas.Timestamp,../reference/api/pandas.Timestamp
+generated/pandas.Timestamp.is_leap_year,../reference/api/pandas.Timestamp.is_leap_year
+generated/pandas.Timestamp.is_month_end,../reference/api/pandas.Timestamp.is_month_end
+generated/pandas.Timestamp.is_month_start,../reference/api/pandas.Timestamp.is_month_start
+generated/pandas.Timestamp.isocalendar,../reference/api/pandas.Timestamp.isocalendar
+generated/pandas.Timestamp.isoformat,../reference/api/pandas.Timestamp.isoformat
+generated/pandas.Timestamp.isoweekday,../reference/api/pandas.Timestamp.isoweekday
+generated/pandas.Timestamp.is_quarter_end,../reference/api/pandas.Timestamp.is_quarter_end
+generated/pandas.Timestamp.is_quarter_start,../reference/api/pandas.Timestamp.is_quarter_start
+generated/pandas.Timestamp.is_year_end,../reference/api/pandas.Timestamp.is_year_end
+generated/pandas.Timestamp.is_year_start,../reference/api/pandas.Timestamp.is_year_start
+generated/pandas.Timestamp.max,../reference/api/pandas.Timestamp.max
+generated/pandas.Timestamp.microsecond,../reference/api/pandas.Timestamp.microsecond
+generated/pandas.Timestamp.min,../reference/api/pandas.Timestamp.min
+generated/pandas.Timestamp.minute,../reference/api/pandas.Timestamp.minute
+generated/pandas.Timestamp.month,../reference/api/pandas.Timestamp.month
+generated/pandas.Timestamp.month_name,../reference/api/pandas.Timestamp.month_name
+generated/pandas.Timestamp.nanosecond,../reference/api/pandas.Timestamp.nanosecond
+generated/pandas.Timestamp.normalize,../reference/api/pandas.Timestamp.normalize
+generated/pandas.Timestamp.now,../reference/api/pandas.Timestamp.now
+generated/pandas.Timestamp.quarter,../reference/api/pandas.Timestamp.quarter
+generated/pandas.Timestamp.replace,../reference/api/pandas.Timestamp.replace
+generated/pandas.Timestamp.resolution,../reference/api/pandas.Timestamp.resolution
+generated/pandas.Timestamp.round,../reference/api/pandas.Timestamp.round
+generated/pandas.Timestamp.second,../reference/api/pandas.Timestamp.second
+generated/pandas.Timestamp.strftime,../reference/api/pandas.Timestamp.strftime
+generated/pandas.Timestamp.strptime,../reference/api/pandas.Timestamp.strptime
+generated/pandas.Timestamp.time,../reference/api/pandas.Timestamp.time
+generated/pandas.Timestamp.timestamp,../reference/api/pandas.Timestamp.timestamp
+generated/pandas.Timestamp.timetuple,../reference/api/pandas.Timestamp.timetuple
+generated/pandas.Timestamp.timetz,../reference/api/pandas.Timestamp.timetz
+generated/pandas.Timestamp.to_datetime64,../reference/api/pandas.Timestamp.to_datetime64
+generated/pandas.Timestamp.today,../reference/api/pandas.Timestamp.today
+generated/pandas.Timestamp.to_julian_date,../reference/api/pandas.Timestamp.to_julian_date
+generated/pandas.Timestamp.toordinal,../reference/api/pandas.Timestamp.toordinal
+generated/pandas.Timestamp.to_period,../reference/api/pandas.Timestamp.to_period
+generated/pandas.Timestamp.to_pydatetime,../reference/api/pandas.Timestamp.to_pydatetime
+generated/pandas.Timestamp.tz_convert,../reference/api/pandas.Timestamp.tz_convert
+generated/pandas.Timestamp.tz,../reference/api/pandas.Timestamp.tz
+generated/pandas.Timestamp.tzinfo,../reference/api/pandas.Timestamp.tzinfo
+generated/pandas.Timestamp.tz_localize,../reference/api/pandas.Timestamp.tz_localize
+generated/pandas.Timestamp.tzname,../reference/api/pandas.Timestamp.tzname
+generated/pandas.Timestamp.utcfromtimestamp,../reference/api/pandas.Timestamp.utcfromtimestamp
+generated/pandas.Timestamp.utcnow,../reference/api/pandas.Timestamp.utcnow
+generated/pandas.Timestamp.utcoffset,../reference/api/pandas.Timestamp.utcoffset
+generated/pandas.Timestamp.utctimetuple,../reference/api/pandas.Timestamp.utctimetuple
+generated/pandas.Timestamp.value,../reference/api/pandas.Timestamp.value
+generated/pandas.Timestamp.weekday,../reference/api/pandas.Timestamp.weekday
+generated/pandas.Timestamp.weekday_name,../reference/api/pandas.Timestamp.weekday_name
+generated/pandas.Timestamp.week,../reference/api/pandas.Timestamp.week
+generated/pandas.Timestamp.weekofyear,../reference/api/pandas.Timestamp.weekofyear
+generated/pandas.Timestamp.year,../reference/api/pandas.Timestamp.year
+generated/pandas.to_datetime,../reference/api/pandas.to_datetime
+generated/pandas.to_numeric,../reference/api/pandas.to_numeric
+generated/pandas.to_timedelta,../reference/api/pandas.to_timedelta
+generated/pandas.tseries.frequencies.to_offset,../reference/api/pandas.tseries.frequencies.to_offset
+generated/pandas.unique,../reference/api/pandas.unique
+generated/pandas.util.hash_array,../reference/api/pandas.util.hash_array
+generated/pandas.util.hash_pandas_object,../reference/api/pandas.util.hash_pandas_object
+generated/pandas.wide_to_long,../reference/api/pandas.wide_to_long
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index ab51911a610e3..45ebbbc870ca3 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -35,7 +35,7 @@ See the :ref:`overview` for more detail about what's in the library.
     {{ single_doc[:-4] }}
 {% elif single_doc %}
 .. autosummary::
-    :toctree: api/generated/
+    :toctree: reference/api/
 
     {{ single_doc }}
 {% else -%}
@@ -51,7 +51,7 @@ See the :ref:`overview` for more detail about what's in the library.
     ecosystem
     {% endif -%}
     {% if include_api -%}
-    api/index
+    reference/index
     {% endif -%}
     {% if not single_doc -%}
     development/index
diff --git a/doc/source/api/arrays.rst b/doc/source/reference/arrays.rst
similarity index 93%
rename from doc/source/api/arrays.rst
rename to doc/source/reference/arrays.rst
index 5ecc5181af22c..7281f4f748d6f 100644
--- a/doc/source/api/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -31,7 +31,7 @@ The top-level :meth:`array` method can be used to create a new array, which may
 stored in a :class:`Series`, :class:`Index`, or as a column in a :class:`DataFrame`.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    array
 
@@ -48,14 +48,14 @@ or timezone-aware values.
 scalar type for timezone-naive or timezone-aware datetime data.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Timestamp
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Timestamp.asm8
    Timestamp.day
@@ -91,7 +91,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Timestamp.astimezone
    Timestamp.ceil
@@ -142,7 +142,7 @@ is used.
 If the data are tz-aware, then every value in the array must have the same timezone.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    arrays.DatetimeArray
    DatetimeTZDtype
@@ -156,14 +156,14 @@ NumPy can natively represent timedeltas. Pandas provides :class:`Timedelta`
 for symmetry with :class:`Timestamp`.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Timedelta
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Timedelta.asm8
    Timedelta.components
@@ -183,7 +183,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Timedelta.ceil
    Timedelta.floor
@@ -196,7 +196,7 @@ Methods
 A collection of timedeltas may be stored in a :class:`TimedeltaArray`.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    arrays.TimedeltaArray
 
@@ -210,14 +210,14 @@ Pandas represents spans of times as :class:`Period` objects.
 Period
 ------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Period
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Period.day
    Period.dayofweek
@@ -244,7 +244,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Period.asfreq
    Period.now
@@ -255,7 +255,7 @@ A collection of timedeltas may be stored in a :class:`arrays.PeriodArray`.
 Every period in a ``PeriodArray`` must have the same ``freq``.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    arrays.DatetimeArray
    PeriodDtype
@@ -268,14 +268,14 @@ Interval Data
 Arbitrary intervals can be represented as :class:`Interval` objects.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Interval
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Interval.closed
    Interval.closed_left
@@ -291,7 +291,7 @@ Properties
 A collection of intervals may be stored in an :class:`IntervalArray`.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    IntervalArray
    IntervalDtype
@@ -305,7 +305,7 @@ Nullable Integer
 Pandas provides this through :class:`arrays.IntegerArray`.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    arrays.IntegerArray
    Int8Dtype
@@ -327,13 +327,13 @@ limited, fixed set of values. The dtype of a ``Categorical`` can be described by
 a :class:`pandas.api.types.CategoricalDtype`.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    CategoricalDtype
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    CategoricalDtype.categories
    CategoricalDtype.ordered
@@ -341,7 +341,7 @@ a :class:`pandas.api.types.CategoricalDtype`.
 Categorical data can be stored in a :class:`pandas.Categorical`
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    Categorical
@@ -350,14 +350,14 @@ The alternative :meth:`Categorical.from_codes` constructor can be used when you
 have the categories and integer codes already:
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Categorical.from_codes
 
 The dtype information is available on the ``Categorical``
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Categorical.dtype
    Categorical.categories
@@ -368,7 +368,7 @@ The dtype information is available on the ``Categorical``
 the Categorical back to a NumPy array, so categories and order information is not preserved!
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Categorical.__array__
 
@@ -391,7 +391,7 @@ Data where a single value is repeated many times (e.g. ``0`` or ``NaN``) may
 be stored efficiently as a :class:`SparseArray`.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    SparseArray
    SparseDtype
diff --git a/doc/source/api/extensions.rst b/doc/source/reference/extensions.rst
similarity index 95%
rename from doc/source/api/extensions.rst
rename to doc/source/reference/extensions.rst
index 3972354ff9651..6146e34fab274 100644
--- a/doc/source/api/extensions.rst
+++ b/doc/source/reference/extensions.rst
@@ -11,7 +11,7 @@ These are primarily intended for library authors looking to extend pandas
 objects.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    api.extensions.register_extension_dtype
    api.extensions.register_dataframe_accessor
diff --git a/doc/source/api/frame.rst b/doc/source/reference/frame.rst
similarity index 93%
rename from doc/source/api/frame.rst
rename to doc/source/reference/frame.rst
index de16d59fe7c40..568acd5207bd1 100644
--- a/doc/source/api/frame.rst
+++ b/doc/source/reference/frame.rst
@@ -10,7 +10,7 @@ DataFrame
 Constructor
 ~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame
 
@@ -19,13 +19,13 @@ Attributes and underlying data
 **Axes**
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.index
    DataFrame.columns
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.dtypes
    DataFrame.ftypes
@@ -45,7 +45,7 @@ Attributes and underlying data
 Conversion
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.astype
    DataFrame.convert_objects
@@ -58,7 +58,7 @@ Conversion
 Indexing, iteration
 ~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.head
    DataFrame.at
@@ -88,7 +88,7 @@ For more information on ``.at``, ``.iat``, ``.loc``, and
 Binary operator functions
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.add
    DataFrame.sub
@@ -119,7 +119,7 @@ Binary operator functions
 Function application, GroupBy & Window
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.apply
    DataFrame.applymap
@@ -137,7 +137,7 @@ Function application, GroupBy & Window
 Computations / Descriptive Stats
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.abs
    DataFrame.all
@@ -181,7 +181,7 @@ Computations / Descriptive Stats
 Reindexing / Selection / Label manipulation
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.add_prefix
    DataFrame.add_suffix
@@ -217,7 +217,7 @@ Reindexing / Selection / Label manipulation
 Missing data handling
 ~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.dropna
    DataFrame.fillna
@@ -227,7 +227,7 @@ Missing data handling
 Reshaping, sorting, transposing
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.droplevel
    DataFrame.pivot
@@ -251,7 +251,7 @@ Reshaping, sorting, transposing
 Combining / joining / merging
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.append
    DataFrame.assign
@@ -262,7 +262,7 @@ Combining / joining / merging
 Time series-related
 ~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.asfreq
    DataFrame.asof
@@ -285,13 +285,13 @@ Plotting
 specific plotting methods of the form ``DataFrame.plot.<kind>``.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_callable.rst
 
    DataFrame.plot
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_method.rst
 
    DataFrame.plot.area
@@ -307,7 +307,7 @@ specific plotting methods of the form ``DataFrame.plot.<kind>``.
    DataFrame.plot.scatter
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.boxplot
    DataFrame.hist
@@ -315,7 +315,7 @@ specific plotting methods of the form ``DataFrame.plot.<kind>``.
 Serialization / IO / Conversion
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrame.from_csv
    DataFrame.from_dict
@@ -346,6 +346,6 @@ Serialization / IO / Conversion
 Sparse
 ~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    SparseDataFrame.to_coo
diff --git a/doc/source/api/general_functions.rst b/doc/source/reference/general_functions.rst
similarity index 84%
rename from doc/source/api/general_functions.rst
rename to doc/source/reference/general_functions.rst
index cef5d8cac6abc..b5832cb8aa591 100644
--- a/doc/source/api/general_functions.rst
+++ b/doc/source/reference/general_functions.rst
@@ -10,7 +10,7 @@ General functions
 Data manipulations
 ~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    melt
    pivot
@@ -30,7 +30,7 @@ Data manipulations
 Top-level missing data
 ~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    isna
    isnull
@@ -40,14 +40,14 @@ Top-level missing data
 Top-level conversions
 ~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    to_numeric
 
 Top-level dealing with datetimelike
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    to_datetime
    to_timedelta
@@ -60,21 +60,21 @@ Top-level dealing with datetimelike
 Top-level dealing with intervals
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    interval_range
 
 Top-level evaluation
 ~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    eval
 
 Hashing
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    util.hash_array
    util.hash_pandas_object
@@ -82,6 +82,6 @@ Hashing
 Testing
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    test
diff --git a/doc/source/api/general_utility_functions.rst b/doc/source/reference/general_utility_functions.rst
similarity index 93%
rename from doc/source/api/general_utility_functions.rst
rename to doc/source/reference/general_utility_functions.rst
index e151f8f57ed5e..9c69770c0f1b7 100644
--- a/doc/source/api/general_utility_functions.rst
+++ b/doc/source/reference/general_utility_functions.rst
@@ -10,7 +10,7 @@ General utility functions
 Working with options
 --------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    describe_option
    reset_option
@@ -21,7 +21,7 @@ Working with options
 Testing functions
 -----------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    testing.assert_frame_equal
    testing.assert_series_equal
@@ -30,7 +30,7 @@ Testing functions
 Exceptions and warnings
 -----------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    errors.DtypeWarning
    errors.EmptyDataError
@@ -44,7 +44,7 @@ Exceptions and warnings
 Data types related functionality
 --------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    api.types.union_categoricals
    api.types.infer_dtype
@@ -53,7 +53,7 @@ Data types related functionality
 Dtype introspection
 ~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     api.types.is_bool_dtype
     api.types.is_categorical_dtype
@@ -81,7 +81,7 @@ Dtype introspection
 Iterable introspection
 ~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     api.types.is_dict_like
     api.types.is_file_like
@@ -92,7 +92,7 @@ Iterable introspection
 Scalar introspection
 ~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     api.types.is_bool
     api.types.is_categorical
diff --git a/doc/source/api/groupby.rst b/doc/source/reference/groupby.rst
similarity index 94%
rename from doc/source/api/groupby.rst
rename to doc/source/reference/groupby.rst
index d67c7e0889522..6ed85ff2fac43 100644
--- a/doc/source/api/groupby.rst
+++ b/doc/source/reference/groupby.rst
@@ -12,7 +12,7 @@ GroupBy objects are returned by groupby calls: :func:`pandas.DataFrame.groupby`,
 Indexing, iteration
 -------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    GroupBy.__iter__
    GroupBy.groups
@@ -22,7 +22,7 @@ Indexing, iteration
 .. currentmodule:: pandas
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    Grouper
@@ -32,7 +32,7 @@ Indexing, iteration
 Function application
 --------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    GroupBy.apply
    GroupBy.agg
@@ -43,7 +43,7 @@ Function application
 Computations / Descriptive Stats
 --------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    GroupBy.all
    GroupBy.any
@@ -78,7 +78,7 @@ axis argument, and often an argument indicating whether to restrict
 application to columns of a specific data type.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrameGroupBy.all
    DataFrameGroupBy.any
@@ -113,7 +113,7 @@ application to columns of a specific data type.
 The following methods are available only for ``SeriesGroupBy`` objects.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    SeriesGroupBy.nlargest
    SeriesGroupBy.nsmallest
@@ -126,7 +126,7 @@ The following methods are available only for ``SeriesGroupBy`` objects.
 The following methods are available only for ``DataFrameGroupBy`` objects.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DataFrameGroupBy.corrwith
    DataFrameGroupBy.boxplot
diff --git a/doc/source/api/index.rst b/doc/source/reference/index.rst
similarity index 56%
rename from doc/source/api/index.rst
rename to doc/source/reference/index.rst
index e4d118e278128..ef4676054473a 100644
--- a/doc/source/api/index.rst
+++ b/doc/source/reference/index.rst
@@ -44,31 +44,31 @@ public functions related to data types in pandas.
 .. toctree::
    :hidden:
 
-   generated/pandas.DataFrame.blocks
-   generated/pandas.DataFrame.as_matrix
-   generated/pandas.DataFrame.ix
-   generated/pandas.Index.asi8
-   generated/pandas.Index.data
-   generated/pandas.Index.flags
-   generated/pandas.Index.holds_integer
-   generated/pandas.Index.is_type_compatible
-   generated/pandas.Index.nlevels
-   generated/pandas.Index.sort
-   generated/pandas.Panel.agg
-   generated/pandas.Panel.aggregate
-   generated/pandas.Panel.blocks
-   generated/pandas.Panel.empty
-   generated/pandas.Panel.is_copy
-   generated/pandas.Panel.items
-   generated/pandas.Panel.ix
-   generated/pandas.Panel.major_axis
-   generated/pandas.Panel.minor_axis
-   generated/pandas.Series.asobject
-   generated/pandas.Series.blocks
-   generated/pandas.Series.from_array
-   generated/pandas.Series.ix
-   generated/pandas.Series.imag
-   generated/pandas.Series.real
+   api/pandas.DataFrame.blocks
+   api/pandas.DataFrame.as_matrix
+   api/pandas.DataFrame.ix
+   api/pandas.Index.asi8
+   api/pandas.Index.data
+   api/pandas.Index.flags
+   api/pandas.Index.holds_integer
+   api/pandas.Index.is_type_compatible
+   api/pandas.Index.nlevels
+   api/pandas.Index.sort
+   api/pandas.Panel.agg
+   api/pandas.Panel.aggregate
+   api/pandas.Panel.blocks
+   api/pandas.Panel.empty
+   api/pandas.Panel.is_copy
+   api/pandas.Panel.items
+   api/pandas.Panel.ix
+   api/pandas.Panel.major_axis
+   api/pandas.Panel.minor_axis
+   api/pandas.Series.asobject
+   api/pandas.Series.blocks
+   api/pandas.Series.from_array
+   api/pandas.Series.ix
+   api/pandas.Series.imag
+   api/pandas.Series.real
 
 
 .. Can't convince sphinx to generate toctree for this class attribute.
@@ -77,4 +77,4 @@ public functions related to data types in pandas.
 .. toctree::
    :hidden:
 
-   generated/pandas.api.extensions.ExtensionDtype.na_value
+   api/pandas.api.extensions.ExtensionDtype.na_value
diff --git a/doc/source/api/indexing.rst b/doc/source/reference/indexing.rst
similarity index 91%
rename from doc/source/api/indexing.rst
rename to doc/source/reference/indexing.rst
index d27b05322c1f2..680cb7e3dac91 100644
--- a/doc/source/api/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -15,14 +15,14 @@ that contain an index (Series/DataFrame) and those should most likely be
 used before calling these methods directly.**
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.values
    Index.is_monotonic
@@ -51,7 +51,7 @@ Properties
 Modifying and Computations
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.all
    Index.any
@@ -90,7 +90,7 @@ Modifying and Computations
 Compatibility with MultiIndex
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.set_names
    Index.is_lexsorted_for_tuple
@@ -99,7 +99,7 @@ Compatibility with MultiIndex
 Missing Values
 ~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.fillna
    Index.dropna
@@ -109,7 +109,7 @@ Missing Values
 Conversion
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.astype
    Index.item
@@ -124,7 +124,7 @@ Conversion
 Sorting
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.argsort
    Index.searchsorted
@@ -133,14 +133,14 @@ Sorting
 Time-specific operations
 ~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.shift
 
 Combining / joining / set operations
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.append
    Index.join
@@ -152,7 +152,7 @@ Combining / joining / set operations
 Selecting
 ~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Index.asof
    Index.asof_locs
@@ -176,7 +176,7 @@ Selecting
 Numeric Index
 -------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    RangeIndex
@@ -188,7 +188,7 @@ Numeric Index
 .. Separate block, since they aren't classes.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    RangeIndex.from_range
 
@@ -197,7 +197,7 @@ Numeric Index
 CategoricalIndex
 ----------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    CategoricalIndex
@@ -205,7 +205,7 @@ CategoricalIndex
 Categorical Components
 ~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    CategoricalIndex.codes
    CategoricalIndex.categories
@@ -222,7 +222,7 @@ Categorical Components
 Modifying and Computations
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    CategoricalIndex.map
    CategoricalIndex.equals
@@ -232,7 +232,7 @@ Modifying and Computations
 IntervalIndex
 -------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    IntervalIndex
@@ -240,7 +240,7 @@ IntervalIndex
 IntervalIndex Components
 ~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    IntervalIndex.from_arrays
    IntervalIndex.from_tuples
@@ -265,20 +265,20 @@ IntervalIndex Components
 MultiIndex
 ----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    MultiIndex
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    IndexSlice
 
 MultiIndex Constructors
 ~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    MultiIndex.from_arrays
    MultiIndex.from_tuples
@@ -288,7 +288,7 @@ MultiIndex Constructors
 MultiIndex Properties
 ~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    MultiIndex.names
    MultiIndex.levels
@@ -299,7 +299,7 @@ MultiIndex Properties
 MultiIndex Components
 ~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    MultiIndex.set_levels
    MultiIndex.set_codes
@@ -316,7 +316,7 @@ MultiIndex Components
 MultiIndex Selecting
 ~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    MultiIndex.get_loc
    MultiIndex.get_loc_level
@@ -328,7 +328,7 @@ MultiIndex Selecting
 DatetimeIndex
 -------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    DatetimeIndex
@@ -336,7 +336,7 @@ DatetimeIndex
 Time/Date Components
 ~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DatetimeIndex.year
    DatetimeIndex.month
@@ -370,7 +370,7 @@ Time/Date Components
 Selecting
 ~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DatetimeIndex.indexer_at_time
    DatetimeIndex.indexer_between_time
@@ -379,7 +379,7 @@ Selecting
 Time-specific operations
 ~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DatetimeIndex.normalize
    DatetimeIndex.strftime
@@ -395,7 +395,7 @@ Time-specific operations
 Conversion
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    DatetimeIndex.to_period
    DatetimeIndex.to_perioddelta
@@ -406,7 +406,7 @@ Conversion
 TimedeltaIndex
 --------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    TimedeltaIndex
@@ -414,7 +414,7 @@ TimedeltaIndex
 Components
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    TimedeltaIndex.days
    TimedeltaIndex.seconds
@@ -426,7 +426,7 @@ Components
 Conversion
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    TimedeltaIndex.to_pytimedelta
    TimedeltaIndex.to_series
@@ -440,7 +440,7 @@ Conversion
 PeriodIndex
 -----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    PeriodIndex
@@ -448,7 +448,7 @@ PeriodIndex
 Properties
 ~~~~~~~~~~
 .. autosummary::
-    :toctree: generated/
+    :toctree: api/
 
     PeriodIndex.day
     PeriodIndex.dayofweek
@@ -474,7 +474,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-    :toctree: generated/
+    :toctree: api/
 
     PeriodIndex.asfreq
     PeriodIndex.strftime
diff --git a/doc/source/api/io.rst b/doc/source/reference/io.rst
similarity index 78%
rename from doc/source/api/io.rst
rename to doc/source/reference/io.rst
index f2060b7c05413..9c776e3ff8a82 100644
--- a/doc/source/api/io.rst
+++ b/doc/source/reference/io.rst
@@ -10,14 +10,14 @@ Input/Output
 Pickling
 ~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_pickle
 
 Flat File
 ~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_table
    read_csv
@@ -27,20 +27,20 @@ Flat File
 Clipboard
 ~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_clipboard
 
 Excel
 ~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_excel
    ExcelFile.parse
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/class_without_autosummary.rst
 
    ExcelWriter
@@ -48,14 +48,14 @@ Excel
 JSON
 ~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_json
 
 .. currentmodule:: pandas.io.json
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    json_normalize
    build_table_schema
@@ -65,14 +65,14 @@ JSON
 HTML
 ~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_html
 
 HDFStore: PyTables (HDF5)
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_hdf
    HDFStore.put
@@ -87,28 +87,28 @@ HDFStore: PyTables (HDF5)
 Feather
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_feather
 
 Parquet
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_parquet
 
 SAS
 ~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_sas
 
 SQL
 ~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_sql_table
    read_sql_query
@@ -117,21 +117,21 @@ SQL
 Google BigQuery
 ~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_gbq
 
 STATA
 ~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    read_stata
 
 .. currentmodule:: pandas.io.stata
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    StataReader.data
    StataReader.data_label
diff --git a/doc/source/api/offset_frequency.rst b/doc/source/reference/offset_frequency.rst
similarity index 84%
rename from doc/source/api/offset_frequency.rst
rename to doc/source/reference/offset_frequency.rst
index 42894fe8d7f2f..ccc1c7e171d22 100644
--- a/doc/source/api/offset_frequency.rst
+++ b/doc/source/reference/offset_frequency.rst
@@ -10,14 +10,14 @@ Date Offsets
 DateOffset
 ----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     DateOffset
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     DateOffset.freqstr
     DateOffset.kwds
@@ -29,7 +29,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     DateOffset.apply
     DateOffset.copy
@@ -39,14 +39,14 @@ Methods
 BusinessDay
 -----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessDay
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessDay.freqstr
     BusinessDay.kwds
@@ -58,7 +58,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessDay.apply
     BusinessDay.apply_index
@@ -69,14 +69,14 @@ Methods
 BusinessHour
 ------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessHour
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessHour.freqstr
     BusinessHour.kwds
@@ -88,7 +88,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessHour.apply
     BusinessHour.copy
@@ -98,14 +98,14 @@ Methods
 CustomBusinessDay
 -----------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessDay
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessDay.freqstr
     CustomBusinessDay.kwds
@@ -117,7 +117,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessDay.apply
     CustomBusinessDay.copy
@@ -127,14 +127,14 @@ Methods
 CustomBusinessHour
 ------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessHour
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessHour.freqstr
     CustomBusinessHour.kwds
@@ -146,7 +146,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessHour.apply
     CustomBusinessHour.copy
@@ -156,14 +156,14 @@ Methods
 MonthOffset
 -----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthOffset
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthOffset.freqstr
     MonthOffset.kwds
@@ -175,7 +175,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthOffset.apply
     MonthOffset.apply_index
@@ -186,14 +186,14 @@ Methods
 MonthEnd
 --------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthEnd.freqstr
     MonthEnd.kwds
@@ -205,7 +205,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthEnd.apply
     MonthEnd.apply_index
@@ -216,14 +216,14 @@ Methods
 MonthBegin
 ----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthBegin.freqstr
     MonthBegin.kwds
@@ -235,7 +235,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     MonthBegin.apply
     MonthBegin.apply_index
@@ -246,14 +246,14 @@ Methods
 BusinessMonthEnd
 ----------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessMonthEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessMonthEnd.freqstr
     BusinessMonthEnd.kwds
@@ -265,7 +265,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessMonthEnd.apply
     BusinessMonthEnd.apply_index
@@ -276,14 +276,14 @@ Methods
 BusinessMonthBegin
 ------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessMonthBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessMonthBegin.freqstr
     BusinessMonthBegin.kwds
@@ -295,7 +295,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BusinessMonthBegin.apply
     BusinessMonthBegin.apply_index
@@ -306,14 +306,14 @@ Methods
 CustomBusinessMonthEnd
 ----------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessMonthEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessMonthEnd.freqstr
     CustomBusinessMonthEnd.kwds
@@ -326,7 +326,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessMonthEnd.apply
     CustomBusinessMonthEnd.copy
@@ -336,14 +336,14 @@ Methods
 CustomBusinessMonthBegin
 ------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessMonthBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessMonthBegin.freqstr
     CustomBusinessMonthBegin.kwds
@@ -356,7 +356,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CustomBusinessMonthBegin.apply
     CustomBusinessMonthBegin.copy
@@ -366,14 +366,14 @@ Methods
 SemiMonthOffset
 ---------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthOffset
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthOffset.freqstr
     SemiMonthOffset.kwds
@@ -385,7 +385,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthOffset.apply
     SemiMonthOffset.apply_index
@@ -396,14 +396,14 @@ Methods
 SemiMonthEnd
 ------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthEnd.freqstr
     SemiMonthEnd.kwds
@@ -415,7 +415,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthEnd.apply
     SemiMonthEnd.apply_index
@@ -426,14 +426,14 @@ Methods
 SemiMonthBegin
 --------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthBegin.freqstr
     SemiMonthBegin.kwds
@@ -445,7 +445,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     SemiMonthBegin.apply
     SemiMonthBegin.apply_index
@@ -456,14 +456,14 @@ Methods
 Week
 ----
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Week
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Week.freqstr
     Week.kwds
@@ -475,7 +475,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Week.apply
     Week.apply_index
@@ -486,14 +486,14 @@ Methods
 WeekOfMonth
 -----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     WeekOfMonth
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     WeekOfMonth.freqstr
     WeekOfMonth.kwds
@@ -505,7 +505,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     WeekOfMonth.apply
     WeekOfMonth.copy
@@ -515,14 +515,14 @@ Methods
 LastWeekOfMonth
 ---------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     LastWeekOfMonth
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     LastWeekOfMonth.freqstr
     LastWeekOfMonth.kwds
@@ -534,7 +534,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     LastWeekOfMonth.apply
     LastWeekOfMonth.copy
@@ -544,14 +544,14 @@ Methods
 QuarterOffset
 -------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterOffset
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterOffset.freqstr
     QuarterOffset.kwds
@@ -563,7 +563,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterOffset.apply
     QuarterOffset.apply_index
@@ -574,14 +574,14 @@ Methods
 BQuarterEnd
 -----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BQuarterEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BQuarterEnd.freqstr
     BQuarterEnd.kwds
@@ -593,7 +593,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BQuarterEnd.apply
     BQuarterEnd.apply_index
@@ -604,14 +604,14 @@ Methods
 BQuarterBegin
 -------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BQuarterBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BQuarterBegin.freqstr
     BQuarterBegin.kwds
@@ -623,7 +623,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BQuarterBegin.apply
     BQuarterBegin.apply_index
@@ -634,14 +634,14 @@ Methods
 QuarterEnd
 ----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterEnd.freqstr
     QuarterEnd.kwds
@@ -653,7 +653,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterEnd.apply
     QuarterEnd.apply_index
@@ -664,14 +664,14 @@ Methods
 QuarterBegin
 ------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterBegin.freqstr
     QuarterBegin.kwds
@@ -683,7 +683,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     QuarterBegin.apply
     QuarterBegin.apply_index
@@ -694,14 +694,14 @@ Methods
 YearOffset
 ----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearOffset
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearOffset.freqstr
     YearOffset.kwds
@@ -713,7 +713,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearOffset.apply
     YearOffset.apply_index
@@ -724,14 +724,14 @@ Methods
 BYearEnd
 --------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BYearEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BYearEnd.freqstr
     BYearEnd.kwds
@@ -743,7 +743,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BYearEnd.apply
     BYearEnd.apply_index
@@ -754,14 +754,14 @@ Methods
 BYearBegin
 ----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BYearBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BYearBegin.freqstr
     BYearBegin.kwds
@@ -773,7 +773,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BYearBegin.apply
     BYearBegin.apply_index
@@ -784,14 +784,14 @@ Methods
 YearEnd
 -------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearEnd.freqstr
     YearEnd.kwds
@@ -803,7 +803,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearEnd.apply
     YearEnd.apply_index
@@ -814,14 +814,14 @@ Methods
 YearBegin
 ---------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearBegin.freqstr
     YearBegin.kwds
@@ -833,7 +833,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     YearBegin.apply
     YearBegin.apply_index
@@ -844,14 +844,14 @@ Methods
 FY5253
 ------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     FY5253
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     FY5253.freqstr
     FY5253.kwds
@@ -863,7 +863,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     FY5253.apply
     FY5253.copy
@@ -875,14 +875,14 @@ Methods
 FY5253Quarter
 -------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     FY5253Quarter
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     FY5253Quarter.freqstr
     FY5253Quarter.kwds
@@ -894,7 +894,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     FY5253Quarter.apply
     FY5253Quarter.copy
@@ -906,14 +906,14 @@ Methods
 Easter
 ------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Easter
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Easter.freqstr
     Easter.kwds
@@ -925,7 +925,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Easter.apply
     Easter.copy
@@ -935,14 +935,14 @@ Methods
 Tick
 ----
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Tick
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Tick.delta
     Tick.freqstr
@@ -955,7 +955,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Tick.copy
     Tick.isAnchored
@@ -964,14 +964,14 @@ Methods
 Day
 ---
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Day
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Day.delta
     Day.freqstr
@@ -984,7 +984,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Day.copy
     Day.isAnchored
@@ -993,14 +993,14 @@ Methods
 Hour
 ----
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Hour
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Hour.delta
     Hour.freqstr
@@ -1013,7 +1013,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Hour.copy
     Hour.isAnchored
@@ -1022,14 +1022,14 @@ Methods
 Minute
 ------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Minute
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Minute.delta
     Minute.freqstr
@@ -1042,7 +1042,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Minute.copy
     Minute.isAnchored
@@ -1051,14 +1051,14 @@ Methods
 Second
 ------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Second
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Second.delta
     Second.freqstr
@@ -1071,7 +1071,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Second.copy
     Second.isAnchored
@@ -1080,14 +1080,14 @@ Methods
 Milli
 -----
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Milli
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Milli.delta
     Milli.freqstr
@@ -1100,7 +1100,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Milli.copy
     Milli.isAnchored
@@ -1109,14 +1109,14 @@ Methods
 Micro
 -----
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Micro
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Micro.delta
     Micro.freqstr
@@ -1129,7 +1129,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Micro.copy
     Micro.isAnchored
@@ -1138,14 +1138,14 @@ Methods
 Nano
 ----
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Nano
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Nano.delta
     Nano.freqstr
@@ -1158,7 +1158,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     Nano.copy
     Nano.isAnchored
@@ -1167,14 +1167,14 @@ Methods
 BDay
 ----
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BDay
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BDay.base
     BDay.freqstr
@@ -1188,7 +1188,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BDay.apply
     BDay.apply_index
@@ -1201,14 +1201,14 @@ Methods
 BMonthEnd
 ---------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BMonthEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BMonthEnd.base
     BMonthEnd.freqstr
@@ -1221,7 +1221,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BMonthEnd.apply
     BMonthEnd.apply_index
@@ -1234,14 +1234,14 @@ Methods
 BMonthBegin
 -----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BMonthBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BMonthBegin.base
     BMonthBegin.freqstr
@@ -1254,7 +1254,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     BMonthBegin.apply
     BMonthBegin.apply_index
@@ -1267,14 +1267,14 @@ Methods
 CBMonthEnd
 ----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CBMonthEnd
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CBMonthEnd.base
     CBMonthEnd.cbday_roll
@@ -1291,7 +1291,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CBMonthEnd.apply
     CBMonthEnd.apply_index
@@ -1304,14 +1304,14 @@ Methods
 CBMonthBegin
 ------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CBMonthBegin
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CBMonthBegin.base
     CBMonthBegin.cbday_roll
@@ -1328,7 +1328,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CBMonthBegin.apply
     CBMonthBegin.apply_index
@@ -1341,14 +1341,14 @@ Methods
 CDay
 ----
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CDay
 
 Properties
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CDay.base
     CDay.freqstr
@@ -1362,7 +1362,7 @@ Properties
 Methods
 ~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
     CDay.apply
     CDay.apply_index
@@ -1382,6 +1382,6 @@ Frequencies
 .. _api.offsets:
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    to_offset
diff --git a/doc/source/api/panel.rst b/doc/source/reference/panel.rst
similarity index 90%
rename from doc/source/api/panel.rst
rename to doc/source/reference/panel.rst
index 4edcd22d2685d..39c8ba0828859 100644
--- a/doc/source/api/panel.rst
+++ b/doc/source/reference/panel.rst
@@ -10,7 +10,7 @@ Panel
 Constructor
 ~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel
 
@@ -23,7 +23,7 @@ Properties and underlying data
 * **minor_axis**: axis 2; the columns of each of the DataFrames
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.values
    Panel.axes
@@ -38,7 +38,7 @@ Properties and underlying data
 Conversion
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.astype
    Panel.copy
@@ -48,7 +48,7 @@ Conversion
 Getting and setting
 ~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.get_value
    Panel.set_value
@@ -56,7 +56,7 @@ Getting and setting
 Indexing, iteration, slicing
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.at
    Panel.iat
@@ -75,7 +75,7 @@ For more information on ``.at``, ``.iat``, ``.loc``, and
 Binary operator functions
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.add
    Panel.sub
@@ -103,7 +103,7 @@ Binary operator functions
 Function application, GroupBy
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.apply
    Panel.groupby
@@ -113,7 +113,7 @@ Function application, GroupBy
 Computations / Descriptive Stats
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.abs
    Panel.clip
@@ -139,7 +139,7 @@ Computations / Descriptive Stats
 Reindexing / Selection / Label manipulation
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.add_prefix
    Panel.add_suffix
@@ -160,14 +160,14 @@ Reindexing / Selection / Label manipulation
 Missing data handling
 ~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.dropna
 
 Reshaping, sorting, transposing
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.sort_index
    Panel.swaplevel
@@ -178,7 +178,7 @@ Reshaping, sorting, transposing
 Combining / joining / merging
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.join
    Panel.update
@@ -186,7 +186,7 @@ Combining / joining / merging
 Time series-related
 ~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.asfreq
    Panel.shift
@@ -197,7 +197,7 @@ Time series-related
 Serialization / IO / Conversion
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Panel.from_dict
    Panel.to_pickle
diff --git a/doc/source/api/plotting.rst b/doc/source/reference/plotting.rst
similarity index 93%
rename from doc/source/api/plotting.rst
rename to doc/source/reference/plotting.rst
index c4e6333ebda37..7615e1d20f5e2 100644
--- a/doc/source/api/plotting.rst
+++ b/doc/source/reference/plotting.rst
@@ -10,7 +10,7 @@ Plotting
 The following functions are contained in the `pandas.plotting` module.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    andrews_curves
    bootstrap_plot
diff --git a/doc/source/api/resampling.rst b/doc/source/reference/resampling.rst
similarity index 91%
rename from doc/source/api/resampling.rst
rename to doc/source/reference/resampling.rst
index f5c6ccce3cdd7..2a52defa3c68f 100644
--- a/doc/source/api/resampling.rst
+++ b/doc/source/reference/resampling.rst
@@ -12,7 +12,7 @@ Resampler objects are returned by resample calls: :func:`pandas.DataFrame.resamp
 Indexing, iteration
 ~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Resampler.__iter__
    Resampler.groups
@@ -22,7 +22,7 @@ Indexing, iteration
 Function application
 ~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Resampler.apply
    Resampler.aggregate
@@ -32,7 +32,7 @@ Function application
 Upsampling
 ~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Resampler.ffill
    Resampler.backfill
@@ -46,7 +46,7 @@ Upsampling
 Computations / Descriptive Stats
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Resampler.count
    Resampler.nunique
diff --git a/doc/source/api/series.rst b/doc/source/reference/series.rst
similarity index 93%
rename from doc/source/api/series.rst
rename to doc/source/reference/series.rst
index aa43c8b643d44..a6ac40b5203bf 100644
--- a/doc/source/api/series.rst
+++ b/doc/source/reference/series.rst
@@ -10,7 +10,7 @@ Series
 Constructor
 -----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series
 
@@ -19,12 +19,12 @@ Attributes
 **Axes**
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.index
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.array
    Series.values
@@ -52,7 +52,7 @@ Attributes
 Conversion
 ----------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.astype
    Series.infer_objects
@@ -69,7 +69,7 @@ Conversion
 Indexing, iteration
 -------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.get
    Series.at
@@ -90,7 +90,7 @@ For more information on ``.at``, ``.iat``, ``.loc``, and
 Binary operator functions
 -------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.add
    Series.sub
@@ -123,7 +123,7 @@ Binary operator functions
 Function application, GroupBy & Window
 --------------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.apply
    Series.agg
@@ -141,7 +141,7 @@ Function application, GroupBy & Window
 Computations / Descriptive Stats
 --------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.abs
    Series.all
@@ -192,7 +192,7 @@ Computations / Descriptive Stats
 Reindexing / Selection / Label manipulation
 -------------------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.align
    Series.drop
@@ -226,7 +226,7 @@ Reindexing / Selection / Label manipulation
 Missing data handling
 ---------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.isna
    Series.notna
@@ -237,7 +237,7 @@ Missing data handling
 Reshaping, sorting
 ------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.argsort
    Series.argmin
@@ -256,7 +256,7 @@ Reshaping, sorting
 Combining / joining / merging
 -----------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.append
    Series.replace
@@ -265,7 +265,7 @@ Combining / joining / merging
 Time series-related
 -------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.asfreq
    Series.asof
@@ -309,7 +309,7 @@ Datetime Properties
 ^^^^^^^^^^^^^^^^^^^
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_attribute.rst
 
    Series.dt.date
@@ -345,7 +345,7 @@ Datetime Methods
 ^^^^^^^^^^^^^^^^
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_method.rst
 
    Series.dt.to_period
@@ -364,7 +364,7 @@ Period Properties
 ^^^^^^^^^^^^^^^^^
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_attribute.rst
 
    Series.dt.qyear
@@ -375,7 +375,7 @@ Timedelta Properties
 ^^^^^^^^^^^^^^^^^^^^
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_attribute.rst
 
    Series.dt.days
@@ -388,7 +388,7 @@ Timedelta Methods
 ^^^^^^^^^^^^^^^^^
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_method.rst
 
    Series.dt.to_pytimedelta
@@ -405,7 +405,7 @@ strings and apply several methods to it. These can be accessed like
 ``Series.str.<function/property>``.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_method.rst
 
    Series.str.capitalize
@@ -467,7 +467,7 @@ strings and apply several methods to it. These can be accessed like
 
 ..
     .. autosummary::
-       :toctree: generated/
+       :toctree: api/
        :template: autosummary/accessor.rst
 
        Series.str
@@ -484,7 +484,7 @@ Categorical-dtype specific methods and attributes are available under
 the ``Series.cat`` accessor.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_attribute.rst
 
    Series.cat.categories
@@ -492,7 +492,7 @@ the ``Series.cat`` accessor.
    Series.cat.codes
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_method.rst
 
    Series.cat.rename_categories
@@ -514,7 +514,7 @@ Sparse-dtype specific methods and attributes are provided under the
 ``Series.sparse`` accessor.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_attribute.rst
 
    Series.sparse.npoints
@@ -523,7 +523,7 @@ Sparse-dtype specific methods and attributes are provided under the
    Series.sparse.sp_values
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.sparse.from_coo
    Series.sparse.to_coo
@@ -535,13 +535,13 @@ Plotting
 specific plotting methods of the form ``Series.plot.<kind>``.
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_callable.rst
 
    Series.plot
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
    :template: autosummary/accessor_method.rst
 
    Series.plot.area
@@ -555,14 +555,14 @@ specific plotting methods of the form ``Series.plot.<kind>``.
    Series.plot.pie
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.hist
 
 Serialization / IO / Conversion
 -------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Series.to_pickle
    Series.to_csv
@@ -585,7 +585,7 @@ Sparse
 ------
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    SparseSeries.to_coo
    SparseSeries.from_coo
diff --git a/doc/source/api/style.rst b/doc/source/reference/style.rst
similarity index 88%
rename from doc/source/api/style.rst
rename to doc/source/reference/style.rst
index 70913bbec410d..bd9635b41e343 100644
--- a/doc/source/api/style.rst
+++ b/doc/source/reference/style.rst
@@ -12,7 +12,7 @@ Style
 Styler Constructor
 ------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Styler
    Styler.from_custom_template
@@ -20,7 +20,7 @@ Styler Constructor
 Styler Properties
 -----------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Styler.env
    Styler.template
@@ -29,7 +29,7 @@ Styler Properties
 Style Application
 -----------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Styler.apply
    Styler.applymap
@@ -47,7 +47,7 @@ Style Application
 Builtin Styles
 --------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Styler.highlight_max
    Styler.highlight_min
@@ -58,7 +58,7 @@ Builtin Styles
 Style Export and Import
 -----------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Styler.render
    Styler.export
diff --git a/doc/source/api/window.rst b/doc/source/reference/window.rst
similarity index 95%
rename from doc/source/api/window.rst
rename to doc/source/reference/window.rst
index 3245f5f831688..9e1374a3bd8e4 100644
--- a/doc/source/api/window.rst
+++ b/doc/source/reference/window.rst
@@ -14,7 +14,7 @@ EWM objects are returned by ``.ewm`` calls: :func:`pandas.DataFrame.ewm`, :func:
 Standard moving window functions
 --------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Rolling.count
    Rolling.sum
@@ -39,7 +39,7 @@ Standard moving window functions
 Standard expanding window functions
 -----------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    Expanding.count
    Expanding.sum
@@ -60,7 +60,7 @@ Standard expanding window functions
 Exponentially-weighted moving window functions
 ----------------------------------------------
 .. autosummary::
-   :toctree: generated/
+   :toctree: api/
 
    EWM.mean
    EWM.std
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 0132392aacaff..58e1b2370c7c8 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -4850,7 +4850,7 @@ See also some :ref:`cookbook examples <cookbook.sql>` for some advanced strategi
 The key functions are:
 
 .. autosummary::
-    :toctree: generated/
+    :toctree: ../reference/api/
 
     read_sql_table
     read_sql_query
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 4e389aed2b0d2..bce33f7e78daa 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -796,7 +796,8 @@ def validate_all(prefix, ignore_deprecated=False):
     seen = {}
 
     # functions from the API docs
-    api_doc_fnames = os.path.join(BASE_PATH, 'doc', 'source', 'api', '*.rst')
+    api_doc_fnames = os.path.join(
+        BASE_PATH, 'doc', 'source', 'reference', '*.rst')
     api_items = []
     for api_doc_fname in glob.glob(api_doc_fnames):
         with open(api_doc_fname) as f:

From 0bc2d3e0bf81019ece6003113e8e2ef0e24382b4 Mon Sep 17 00:00:00 2001
From: Marc Garcia <garcia.marc@gmail.com>
Date: Fri, 25 Jan 2019 13:19:32 +0000
Subject: [PATCH 25/48] DOC: Making home page links more compact and clearer
 (#24928)

---
 doc/source/index.rst.template | 23 +++++++++--------------
 1 file changed, 9 insertions(+), 14 deletions(-)

diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 45ebbbc870ca3..55b95868c01dd 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -1,26 +1,21 @@
 .. pandas documentation master file, created by
 
+.. module:: pandas
+
 *********************************************
 pandas: powerful Python data analysis toolkit
 *********************************************
 
-`PDF Version <pandas.pdf>`__
-
-`Zipped HTML <pandas.zip>`__
-
-.. module:: pandas
-
 **Date**: |today| **Version**: |version|
 
-**Binary Installers:** https://pypi.org/project/pandas
-
-**Source Repository:** https://github.com/pandas-dev/pandas
-
-**Issues & Ideas:** https://github.com/pandas-dev/pandas/issues
-
-**Q&A Support:** https://stackoverflow.com/questions/tagged/pandas
+**Download documentation**: `PDF Version <pandas.pdf>`__ | `Zipped HTML <pandas.zip>`__
 
-**Developer Mailing List:** https://groups.google.com/forum/#!forum/pydata
+**Useful links**:
+`Binary Installers <https://pypi.org/project/pandas>`__ |
+`Source Repository <https://github.com/pandas-dev/pandas>`__ |
+`Issues & Ideas <https://github.com/pandas-dev/pandas/issues>`__ |
+`Q&A Support <https://stackoverflow.com/questions/tagged/pandas>`__ |
+`Mailing List <https://groups.google.com/forum/#!forum/pydata>`__
 
 :mod:`pandas` is an open source, BSD-licensed library providing high-performance,
 easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__

From 14b68eabb4b7750c04ccd3ed17247504aceae35a Mon Sep 17 00:00:00 2001
From: Tom Augspurger <TomAugspurger@users.noreply.github.com>
Date: Fri, 25 Jan 2019 08:23:07 -0600
Subject: [PATCH 26/48] DOC: 0.24 release date (#24930)

---
 .gitignore                      | 2 +-
 doc/source/whatsnew/v0.24.0.rst | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/.gitignore b/.gitignore
index 9891883879cf1..816aff376fc83 100644
--- a/.gitignore
+++ b/.gitignore
@@ -101,6 +101,7 @@ asv_bench/pandas/
 # Documentation generated files #
 #################################
 doc/source/generated
+doc/source/user_guide/styled.xlsx
 doc/source/reference/api
 doc/source/_static
 doc/source/vbench
@@ -109,6 +110,5 @@ doc/source/index.rst
 doc/build/html/index.html
 # Windows specific leftover:
 doc/tmp.sv
-doc/source/styled.xlsx
 env/
 doc/source/savefig/
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index f0f99d2def136..489d505cb8f67 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1,6 +1,6 @@
 .. _whatsnew_0240:
 
-What's New in 0.24.0 (January XX, 2019)
+What's New in 0.24.0 (January 25, 2019)
 ---------------------------------------
 
 .. warning::

From 8a8a0830f5a021f7c8d272fabdfa699b4502edd3 Mon Sep 17 00:00:00 2001
From: Marc Garcia <garcia.marc@gmail.com>
Date: Fri, 25 Jan 2019 14:54:36 +0000
Subject: [PATCH 27/48] DOC: Adding version to the whatsnew section in the home
 page (#24929)

---
 doc/source/index.rst.template | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 55b95868c01dd..51487c0d325b5 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -39,7 +39,7 @@ See the :ref:`overview` for more detail about what's in the library.
 {% endif %}
 
     {% if not single_doc -%}
-    What's New <whatsnew/v0.24.0>
+    What's New in 0.24.0 <whatsnew/v0.24.0>
     install
     getting_started/index
     user_guide/index

From c8aae3540e4d22d2581e66843740fba10e9ea0b1 Mon Sep 17 00:00:00 2001
From: Tom Augspurger <TomAugspurger@users.noreply.github.com>
Date: Fri, 25 Jan 2019 08:55:49 -0600
Subject: [PATCH 28/48] API: Remove IntervalArray from top-level (#24926)

---
 doc/source/reference/arrays.rst   |  4 ++--
 doc/source/whatsnew/v0.24.0.rst   |  4 ++--
 pandas/core/api.py                |  1 -
 pandas/core/arrays/array_.py      |  2 +-
 pandas/core/arrays/interval.py    | 29 +++++++++++++++--------------
 pandas/core/indexes/interval.py   | 23 ++++++++++++-----------
 pandas/tests/api/test_api.py      |  1 -
 pandas/tests/arrays/test_array.py |  4 ++--
 8 files changed, 34 insertions(+), 34 deletions(-)

diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index 7281f4f748d6f..1dc74ad83b7e6 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -288,12 +288,12 @@ Properties
    Interval.overlaps
    Interval.right
 
-A collection of intervals may be stored in an :class:`IntervalArray`.
+A collection of intervals may be stored in an :class:`arrays.IntervalArray`.
 
 .. autosummary::
    :toctree: api/
 
-   IntervalArray
+   arrays.IntervalArray
    IntervalDtype
 
 .. _api.arrays.integer_na:
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 489d505cb8f67..fc963fce37a5b 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -225,7 +225,7 @@ from the ``Series``:
    ser.array
    pser.array
 
-These return an instance of :class:`IntervalArray` or :class:`arrays.PeriodArray`,
+These return an instance of :class:`arrays.IntervalArray` or :class:`arrays.PeriodArray`,
 the new extension arrays that back interval and period data.
 
 .. warning::
@@ -411,7 +411,7 @@ Other Enhancements
 - :meth:`Categorical.from_codes` now can take a ``dtype`` parameter as an alternative to passing ``categories`` and ``ordered`` (:issue:`24398`).
 - New attribute ``__git_version__`` will return git commit sha of current build (:issue:`21295`).
 - Compatibility with Matplotlib 3.0 (:issue:`22790`).
-- Added :meth:`Interval.overlaps`, :meth:`IntervalArray.overlaps`, and :meth:`IntervalIndex.overlaps` for determining overlaps between interval-like objects (:issue:`21998`)
+- Added :meth:`Interval.overlaps`, :meth:`arrays.IntervalArray.overlaps`, and :meth:`IntervalIndex.overlaps` for determining overlaps between interval-like objects (:issue:`21998`)
 - :func:`read_fwf` now accepts keyword ``infer_nrows`` (:issue:`15138`).
 - :func:`~DataFrame.to_parquet` now supports writing a ``DataFrame`` as a directory of parquet files partitioned by a subset of the columns when ``engine = 'pyarrow'`` (:issue:`23283`)
 - :meth:`Timestamp.tz_localize`, :meth:`DatetimeIndex.tz_localize`, and :meth:`Series.tz_localize` have gained the ``nonexistent`` argument for alternative handling of nonexistent times. See :ref:`timeseries.timezone_nonexistent` (:issue:`8917`, :issue:`24466`)
diff --git a/pandas/core/api.py b/pandas/core/api.py
index afc929c39086c..8c92287e212a6 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -4,7 +4,6 @@
 
 import numpy as np
 
-from pandas.core.arrays import IntervalArray
 from pandas.core.arrays.integer import (
     Int8Dtype,
     Int16Dtype,
diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py
index c7be8e3f745c4..41d623c7efd9c 100644
--- a/pandas/core/arrays/array_.py
+++ b/pandas/core/arrays/array_.py
@@ -50,7 +50,7 @@ def array(data,         # type: Sequence[object]
         ============================== =====================================
         Scalar Type                    Array Type
         ============================== =====================================
-        :class:`pandas.Interval`       :class:`pandas.IntervalArray`
+        :class:`pandas.Interval`       :class:`pandas.arrays.IntervalArray`
         :class:`pandas.Period`         :class:`pandas.arrays.PeriodArray`
         :class:`datetime.datetime`     :class:`pandas.arrays.DatetimeArray`
         :class:`datetime.timedelta`    :class:`pandas.arrays.TimedeltaArray`
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 45470e03c041a..1e671c7bd956a 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -32,6 +32,7 @@
 
 _shared_docs_kwargs = dict(
     klass='IntervalArray',
+    qualname='arrays.IntervalArray',
     name=''
 )
 
@@ -115,7 +116,7 @@
     A new ``IntervalArray`` can be constructed directly from an array-like of
     ``Interval`` objects:
 
-    >>> pd.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
+    >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
     IntervalArray([(0, 1], (1, 5]],
                   closed='right',
                   dtype='interval[int64]')
@@ -248,8 +249,8 @@ def _from_factorized(cls, values, original):
 
     Examples
     --------
-    >>> pd.%(klass)s.from_breaks([0, 1, 2, 3])
-    %(klass)s([(0, 1], (1, 2], (2, 3]]
+    >>> pd.%(qualname)s.from_breaks([0, 1, 2, 3])
+    %(klass)s([(0, 1], (1, 2], (2, 3]],
                   closed='right',
                   dtype='interval[int64]')
     """
@@ -311,7 +312,7 @@ def from_breaks(cls, breaks, closed='right', copy=False, dtype=None):
         Examples
         --------
         >>> %(klass)s.from_arrays([0, 1, 2], [1, 2, 3])
-        %(klass)s([(0, 1], (1, 2], (2, 3]]
+        %(klass)s([(0, 1], (1, 2], (2, 3]],
                      closed='right',
                      dtype='interval[int64]')
         """
@@ -354,16 +355,16 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None):
 
     Examples
     --------
-    >>> pd.%(klass)s.from_intervals([pd.Interval(0, 1),
+    >>> pd.%(qualname)s.from_intervals([pd.Interval(0, 1),
     ...                                  pd.Interval(1, 2)])
-    %(klass)s([(0, 1], (1, 2]]
+    %(klass)s([(0, 1], (1, 2]],
                   closed='right', dtype='interval[int64]')
 
     The generic Index constructor work identically when it infers an array
     of all intervals:
 
     >>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)])
-    %(klass)s([(0, 1], (1, 2]]
+    %(klass)s([(0, 1], (1, 2]],
                   closed='right', dtype='interval[int64]')
     """
 
@@ -394,7 +395,7 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None):
 
     Examples
     --------
-    >>>  pd.%(klass)s.from_tuples([(0, 1), (1, 2)])
+    >>> pd.%(qualname)s.from_tuples([(0, 1), (1, 2)])
     %(klass)s([(0, 1], (1, 2]],
                 closed='right', dtype='interval[int64]')
     """
@@ -891,13 +892,13 @@ def closed(self):
 
         Examples
         --------
-        >>>  index = pd.interval_range(0, 3)
-        >>>  index
-        %(klass)s([(0, 1], (1, 2], (2, 3]]
+        >>> index = pd.interval_range(0, 3)
+        >>> index
+        IntervalIndex([(0, 1], (1, 2], (2, 3]],
               closed='right',
               dtype='interval[int64]')
-        >>>  index.set_closed('both')
-        %(klass)s([[0, 1], [1, 2], [2, 3]]
+        >>> index.set_closed('both')
+        IntervalIndex([[0, 1], [1, 2], [2, 3]],
               closed='both',
               dtype='interval[int64]')
         """
@@ -1039,7 +1040,7 @@ def repeat(self, repeats, axis=None):
 
         Examples
         --------
-        >>> intervals = pd.%(klass)s.from_tuples([(0, 1), (1, 3), (2, 4)])
+        >>> intervals = pd.%(qualname)s.from_tuples([(0, 1), (1, 3), (2, 4)])
         >>> intervals
         %(klass)s([(0, 1], (1, 3], (2, 4]],
               closed='right',
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 2a6044fb0a08b..0210560aaa21f 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -38,6 +38,7 @@
 
 _index_doc_kwargs.update(
     dict(klass='IntervalIndex',
+         qualname="IntervalIndex",
          target_klass='IntervalIndex or list of Intervals',
          name=textwrap.dedent("""\
          name : object, optional
@@ -282,10 +283,10 @@ def contains(self, key):
         examples="""
         Examples
         --------
-        >>>  idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
-        >>>  idx.to_tuples()
+        >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
+        >>> idx.to_tuples()
         Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
-        >>>  idx.to_tuples(na_tuple=False)
+        >>> idx.to_tuples(na_tuple=False)
         Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""",
     ))
     def to_tuples(self, na_tuple=True):
@@ -1201,15 +1202,15 @@ def interval_range(start=None, end=None, periods=None, freq=None,
     Numeric ``start`` and  ``end`` is supported.
 
     >>> pd.interval_range(start=0, end=5)
-    IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
+    IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
                   closed='right', dtype='interval[int64]')
 
     Additionally, datetime-like input is also supported.
 
     >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
-                          end=pd.Timestamp('2017-01-04'))
+    ...                   end=pd.Timestamp('2017-01-04'))
     IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
-                   (2017-01-03, 2017-01-04]]
+                   (2017-01-03, 2017-01-04]],
                   closed='right', dtype='interval[datetime64[ns]]')
 
     The ``freq`` parameter specifies the frequency between the left and right.
@@ -1217,23 +1218,23 @@ def interval_range(start=None, end=None, periods=None, freq=None,
     numeric ``start`` and ``end``, the frequency must also be numeric.
 
     >>> pd.interval_range(start=0, periods=4, freq=1.5)
-    IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
+    IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
                   closed='right', dtype='interval[float64]')
 
     Similarly, for datetime-like ``start`` and ``end``, the frequency must be
     convertible to a DateOffset.
 
     >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
-                          periods=3, freq='MS')
+    ...                   periods=3, freq='MS')
     IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
-                   (2017-03-01, 2017-04-01]]
+                   (2017-03-01, 2017-04-01]],
                   closed='right', dtype='interval[datetime64[ns]]')
 
     Specify ``start``, ``end``, and ``periods``; the frequency is generated
     automatically (linearly spaced).
 
     >>> pd.interval_range(start=0, end=6, periods=4)
-    IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
+    IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
               closed='right',
               dtype='interval[float64]')
 
@@ -1241,7 +1242,7 @@ def interval_range(start=None, end=None, periods=None, freq=None,
     intervals within the ``IntervalIndex`` are closed.
 
     >>> pd.interval_range(end=5, periods=4, closed='both')
-    IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]
+    IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
                   closed='both', dtype='interval[int64]')
     """
     start = com.maybe_box_datetimelike(start)
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 07cf358c765b3..599ab9a3c5f7c 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -46,7 +46,6 @@ class TestPDApi(Base):
                'Series', 'SparseArray', 'SparseDataFrame', 'SparseDtype',
                'SparseSeries', 'Timedelta',
                'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex',
-               'IntervalArray',
                'CategoricalDtype', 'PeriodDtype', 'IntervalDtype',
                'DatetimeTZDtype',
                'Int8Dtype', 'Int16Dtype', 'Int32Dtype', 'Int64Dtype',
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index 4a51fd63d963b..9fea1989e46df 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -74,7 +74,7 @@
 
     # Interval
     ([pd.Interval(1, 2), pd.Interval(3, 4)], 'interval',
-     pd.IntervalArray.from_tuples([(1, 2), (3, 4)])),
+     pd.arrays.IntervalArray.from_tuples([(1, 2), (3, 4)])),
 
     # Sparse
     ([0, 1], 'Sparse[int64]', pd.SparseArray([0, 1], dtype='int64')),
@@ -129,7 +129,7 @@ def test_array_copy():
 
     # interval
     ([pd.Interval(0, 1), pd.Interval(1, 2)],
-     pd.IntervalArray.from_breaks([0, 1, 2])),
+     pd.arrays.IntervalArray.from_breaks([0, 1, 2])),
 
     # datetime
     ([pd.Timestamp('2000',), pd.Timestamp('2001')],

From 83eb2428ceb6257042173582f3f436c2c887aa69 Mon Sep 17 00:00:00 2001
From: Tom Augspurger <tom.w.augspurger@gmail.com>
Date: Fri, 25 Jan 2019 09:32:26 -0600
Subject: [PATCH 29/48] RLS: 0.24.0


From 0c4113fa0906273007cc12a4bcadff85d943dc84 Mon Sep 17 00:00:00 2001
From: Tom Augspurger <tom.w.augspurger@gmail.com>
Date: Fri, 25 Jan 2019 15:30:08 -0600
Subject: [PATCH 30/48] DEV: Start 0.25 cycle


From d2fa4b42812093d147cd864ca366d6cccac2db30 Mon Sep 17 00:00:00 2001
From: gfyoung <gfyoung17+GitHub@gmail.com>
Date: Sat, 26 Jan 2019 00:01:00 -0800
Subject: [PATCH 31/48] DOC: State that we support scalars in to_numeric
 (#24944)

We support it and test it already.

xref gh-24910.
---
 pandas/core/tools/numeric.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index 803723dab46ff..79d8ee38637f9 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -21,7 +21,7 @@ def to_numeric(arg, errors='raise', downcast=None):
 
     Parameters
     ----------
-    arg : list, tuple, 1-d array, or Series
+    arg : scalar, list, tuple, 1-d array, or Series
     errors : {'ignore', 'raise', 'coerce'}, default 'raise'
         - If 'raise', then invalid parsing will raise an exception
         - If 'coerce', then invalid parsing will be set as NaN

From 2626215ca5dd168ea311f117b988693cef437885 Mon Sep 17 00:00:00 2001
From: Roman Yurchak <rth.yurchak@pm.me>
Date: Sat, 26 Jan 2019 09:01:55 +0100
Subject: [PATCH 32/48] DOC: Minor what's new fix (#24933)

---
 doc/source/whatsnew/v0.24.0.rst | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index fc963fce37a5b..16319a3b83ca4 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -6,7 +6,8 @@ What's New in 0.24.0 (January 25, 2019)
 .. warning::
 
    The 0.24.x series of releases will be the last to support Python 2. Future feature
-   releases will support Python 3 only. See :ref:`install.dropping-27` for more.
+   releases will support Python 3 only. See :ref:`install.dropping-27` for more
+   details.
 
 {{ header }}
 
@@ -244,7 +245,7 @@ the new extension arrays that back interval and period data.
 Joining with two multi-indexes
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-:func:`DataFrame.merge` and :func:`DataFrame.join` can now be used to join multi-indexed ``Dataframe`` instances on the overlaping index levels (:issue:`6360`)
+:func:`DataFrame.merge` and :func:`DataFrame.join` can now be used to join multi-indexed ``Dataframe`` instances on the overlapping index levels (:issue:`6360`)
 
 See the :ref:`Merge, join, and concatenate
 <merging.Join_with_two_multi_indexes>` documentation section.

From c1cad5dedde5872b0b677e42c43e69aa336e034b Mon Sep 17 00:00:00 2001
From: Mike Cramblett <35242616+mike-cramblett@users.noreply.github.com>
Date: Sat, 26 Jan 2019 06:49:30 -0800
Subject: [PATCH 33/48] TST: GH#23922 Add missing match params to pytest.raises
 (#24937)

---
 pandas/tests/arithmetic/test_datetime64.py | 230 +++++++++++----------
 1 file changed, 124 insertions(+), 106 deletions(-)

diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index f97a1651163e8..405dc0805a285 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -124,14 +124,14 @@ def test_comparison_invalid(self, box_with_array):
             result = x != y
             expected = tm.box_expected([True] * 5, xbox)
             tm.assert_equal(result, expected)
-
-            with pytest.raises(TypeError):
+            msg = 'Invalid comparison between'
+            with pytest.raises(TypeError, match=msg):
                 x >= y
-            with pytest.raises(TypeError):
+            with pytest.raises(TypeError, match=msg):
                 x > y
-            with pytest.raises(TypeError):
+            with pytest.raises(TypeError, match=msg):
                 x < y
-            with pytest.raises(TypeError):
+            with pytest.raises(TypeError, match=msg):
                 x <= y
 
     @pytest.mark.parametrize('data', [
@@ -327,9 +327,10 @@ def test_comparison_tzawareness_compat(self, op):
         # raise
         naive_series = Series(dr)
         aware_series = Series(dz)
-        with pytest.raises(TypeError):
+        msg = 'Cannot compare tz-naive and tz-aware'
+        with pytest.raises(TypeError, match=msg):
             op(dz, naive_series)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             op(dr, aware_series)
 
         # TODO: implement _assert_tzawareness_compat for the reverse
@@ -428,14 +429,14 @@ def test_dti_cmp_null_scalar_inequality(self, tz_naive_fixture, other,
         dti = pd.date_range('2016-01-01', periods=2, tz=tz)
         # FIXME: ValueError with transpose
         dtarr = tm.box_expected(dti, box_with_array, transpose=False)
-
-        with pytest.raises(TypeError):
+        msg = 'Invalid comparison between'
+        with pytest.raises(TypeError, match=msg):
             dtarr < other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dtarr <= other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dtarr > other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dtarr >= other
 
     @pytest.mark.parametrize('dtype', [None, object])
@@ -584,22 +585,23 @@ def test_comparison_tzawareness_compat(self, op, box_with_array):
         dr = tm.box_expected(dr, box_with_array, transpose=False)
         dz = tm.box_expected(dz, box_with_array, transpose=False)
 
-        with pytest.raises(TypeError):
+        msg = 'Cannot compare tz-naive and tz-aware'
+        with pytest.raises(TypeError, match=msg):
             op(dr, dz)
         if box_with_array is not pd.DataFrame:
             # DataFrame op is invalid until transpose bug is fixed
-            with pytest.raises(TypeError):
+            with pytest.raises(TypeError, match=msg):
                 op(dr, list(dz))
-            with pytest.raises(TypeError):
+            with pytest.raises(TypeError, match=msg):
                 op(dr, np.array(list(dz), dtype=object))
 
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             op(dz, dr)
         if box_with_array is not pd.DataFrame:
             # DataFrame op is invalid until transpose bug is fixed
-            with pytest.raises(TypeError):
+            with pytest.raises(TypeError, match=msg):
                 op(dz, list(dr))
-            with pytest.raises(TypeError):
+            with pytest.raises(TypeError, match=msg):
                 op(dz, np.array(list(dr), dtype=object))
 
         # Check that there isn't a problem aware-aware and naive-naive do not
@@ -617,15 +619,15 @@ def test_comparison_tzawareness_compat(self, op, box_with_array):
         ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
 
         assert_all(dr > ts)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             op(dr, ts_tz)
 
         assert_all(dz > ts_tz)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             op(dz, ts)
 
         # GH#12601: Check comparison against Timestamps and DatetimeIndex
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             op(ts, dz)
 
     @pytest.mark.parametrize('op', [operator.eq, operator.ne,
@@ -641,10 +643,10 @@ def test_scalar_comparison_tzawareness(self, op, other, tz_aware_fixture,
 
         # FIXME: ValueError with transpose
         dtarr = tm.box_expected(dti, box_with_array, transpose=False)
-
-        with pytest.raises(TypeError):
+        msg = 'Cannot compare tz-naive and tz-aware'
+        with pytest.raises(TypeError, match=msg):
             op(dtarr, other)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             op(other, dtarr)
 
     @pytest.mark.parametrize('op', [operator.eq, operator.ne,
@@ -714,14 +716,14 @@ def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture,
         expected = np.array([True] * 10)
         expected = tm.box_expected(expected, xbox, transpose=False)
         tm.assert_equal(result, expected)
-
-        with pytest.raises(TypeError):
+        msg = 'Invalid comparison between'
+        with pytest.raises(TypeError, match=msg):
             rng < other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             rng <= other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             rng > other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             rng >= other
 
     def test_dti_cmp_list(self):
@@ -749,14 +751,14 @@ def test_dti_cmp_tdi_tzawareness(self, other):
         result = dti != other
         expected = np.array([True] * 10)
         tm.assert_numpy_array_equal(result, expected)
-
-        with pytest.raises(TypeError):
+        msg = 'Invalid comparison between'
+        with pytest.raises(TypeError, match=msg):
             dti < other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dti <= other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dti > other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dti >= other
 
     def test_dti_cmp_object_dtype(self):
@@ -770,7 +772,8 @@ def test_dti_cmp_object_dtype(self):
         tm.assert_numpy_array_equal(result, expected)
 
         other = dti.tz_localize(None)
-        with pytest.raises(TypeError):
+        msg = 'Cannot compare tz-naive and tz-aware'
+        with pytest.raises(TypeError, match=msg):
             # tzawareness failure
             dti != other
 
@@ -778,8 +781,8 @@ def test_dti_cmp_object_dtype(self):
         result = dti == other
         expected = np.array([True] * 5 + [False] * 5)
         tm.assert_numpy_array_equal(result, expected)
-
-        with pytest.raises(TypeError):
+        msg = "Cannot compare type"
+        with pytest.raises(TypeError, match=msg):
             dti >= other
 
 
@@ -898,7 +901,8 @@ def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
         tm.assert_equal(result, expected)
         result = obj - other
         tm.assert_equal(result, expected)
-        with pytest.raises(TypeError):
+        msg = 'cannot subtract'
+        with pytest.raises(TypeError, match=msg):
             other - obj
 
     def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture,
@@ -927,8 +931,8 @@ def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture,
 
         result = dtarr - tdarr
         tm.assert_equal(result, expected)
-
-        with pytest.raises(TypeError):
+        msg = 'cannot subtract'
+        with pytest.raises(TypeError, match=msg):
             tdarr - dtarr
 
     # -----------------------------------------------------------------
@@ -1028,10 +1032,10 @@ def test_dt64arr_aware_sub_dt64ndarray_raises(self, tz_aware_fixture,
         dt64vals = dti.values
 
         dtarr = tm.box_expected(dti, box_with_array)
-
-        with pytest.raises(TypeError):
+        msg = 'DatetimeArray subtraction must have the same timezones or'
+        with pytest.raises(TypeError, match=msg):
             dtarr - dt64vals
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dt64vals - dtarr
 
     # -------------------------------------------------------------
@@ -1048,17 +1052,17 @@ def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture,
         dt64vals = dti.values
 
         dtarr = tm.box_expected(dti, box_with_array)
-
-        with pytest.raises(TypeError):
+        msg = 'cannot add'
+        with pytest.raises(TypeError, match=msg):
             dtarr + dt64vals
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dt64vals + dtarr
 
     def test_dt64arr_add_timestamp_raises(self, box_with_array):
         # GH#22163 ensure DataFrame doesn't cast Timestamp to i8
         idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
         idx = tm.box_expected(idx, box_with_array)
-        msg = "cannot add"
+        msg = 'cannot add'
         with pytest.raises(TypeError, match=msg):
             idx + Timestamp('2011-01-01')
         with pytest.raises(TypeError, match=msg):
@@ -1071,13 +1075,14 @@ def test_dt64arr_add_timestamp_raises(self, box_with_array):
     def test_dt64arr_add_sub_float(self, other, box_with_array):
         dti = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
         dtarr = tm.box_expected(dti, box_with_array)
-        with pytest.raises(TypeError):
+        msg = '|'.join(['unsupported operand type', 'cannot (add|subtract)'])
+        with pytest.raises(TypeError, match=msg):
             dtarr + other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             other + dtarr
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dtarr - other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             other - dtarr
 
     @pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@@ -1090,14 +1095,15 @@ def test_dt64arr_add_sub_parr(self, dti_freq, pi_freq,
 
         dtarr = tm.box_expected(dti, box_with_array)
         parr = tm.box_expected(pi, box_with_array2)
-
-        with pytest.raises(TypeError):
+        msg = '|'.join(['cannot (add|subtract)', 'unsupported operand',
+                        'descriptor.*requires', 'ufunc.*cannot use operands'])
+        with pytest.raises(TypeError, match=msg):
             dtarr + parr
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             parr + dtarr
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dtarr - parr
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             parr - dtarr
 
     @pytest.mark.parametrize('dti_freq', [None, 'D'])
@@ -1108,14 +1114,14 @@ def test_dt64arr_add_sub_period_scalar(self, dti_freq, box_with_array):
 
         idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=dti_freq)
         dtarr = tm.box_expected(idx, box_with_array)
-
-        with pytest.raises(TypeError):
+        msg = '|'.join(['unsupported operand type', 'cannot (add|subtract)'])
+        with pytest.raises(TypeError, match=msg):
             dtarr + per
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             per + dtarr
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dtarr - per
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             per - dtarr
 
 
@@ -1156,8 +1162,8 @@ def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
 
         result2 = -pd.offsets.Second(5) + ser
         tm.assert_equal(result2, expected)
-
-        with pytest.raises(TypeError):
+        msg = "bad operand type for unary"
+        with pytest.raises(TypeError, match=msg):
             pd.offsets.Second(5) - ser
 
     @pytest.mark.parametrize('cls_name', ['Day', 'Hour', 'Minute', 'Second',
@@ -1239,8 +1245,8 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
             expected = DatetimeIndex([x - off for x in vec_items])
             expected = tm.box_expected(expected, box_with_array)
             tm.assert_equal(expected, vec - off)
-
-            with pytest.raises(TypeError):
+            msg = "bad operand type for unary"
+            with pytest.raises(TypeError, match=msg):
                 off - vec
 
     # -------------------------------------------------------------
@@ -1320,8 +1326,8 @@ def test_dt64arr_add_sub_DateOffsets(self, box_with_array,
             expected = DatetimeIndex([offset + x for x in vec_items])
             expected = tm.box_expected(expected, box_with_array)
             tm.assert_equal(expected, offset + vec)
-
-            with pytest.raises(TypeError):
+            msg = "bad operand type for unary"
+            with pytest.raises(TypeError, match=msg):
                 offset - vec
 
     def test_dt64arr_add_sub_DateOffset(self, box_with_array):
@@ -1440,13 +1446,14 @@ def test_dt64_series_arith_overflow(self):
         td = pd.Timedelta('20000 Days')
         dti = pd.date_range('1949-09-30', freq='100Y', periods=4)
         ser = pd.Series(dti)
-        with pytest.raises(OverflowError):
+        msg = 'Overflow in int64 addition'
+        with pytest.raises(OverflowError, match=msg):
             ser - dt
-        with pytest.raises(OverflowError):
+        with pytest.raises(OverflowError, match=msg):
             dt - ser
-        with pytest.raises(OverflowError):
+        with pytest.raises(OverflowError, match=msg):
             ser + td
-        with pytest.raises(OverflowError):
+        with pytest.raises(OverflowError, match=msg):
             td + ser
 
         ser.iloc[-1] = pd.NaT
@@ -1480,9 +1487,9 @@ def test_datetimeindex_sub_timestamp_overflow(self):
                            tspos.to_pydatetime(),
                            tspos.to_datetime64().astype('datetime64[ns]'),
                            tspos.to_datetime64().astype('datetime64[D]')]
-
+        msg = 'Overflow in int64 addition'
         for variant in ts_neg_variants:
-            with pytest.raises(OverflowError):
+            with pytest.raises(OverflowError, match=msg):
                 dtimax - variant
 
         expected = pd.Timestamp.max.value - tspos.value
@@ -1496,7 +1503,7 @@ def test_datetimeindex_sub_timestamp_overflow(self):
             assert res[1].value == expected
 
         for variant in ts_pos_variants:
-            with pytest.raises(OverflowError):
+            with pytest.raises(OverflowError, match=msg):
                 dtimin - variant
 
     def test_datetimeindex_sub_datetimeindex_overflow(self):
@@ -1515,22 +1522,22 @@ def test_datetimeindex_sub_datetimeindex_overflow(self):
         expected = pd.Timestamp.min.value - ts_neg[1].value
         result = dtimin - ts_neg
         assert result[1].value == expected
-
-        with pytest.raises(OverflowError):
+        msg = 'Overflow in int64 addition'
+        with pytest.raises(OverflowError, match=msg):
             dtimax - ts_neg
 
-        with pytest.raises(OverflowError):
+        with pytest.raises(OverflowError, match=msg):
             dtimin - ts_pos
 
         # Edge cases
         tmin = pd.to_datetime([pd.Timestamp.min])
         t1 = tmin + pd.Timedelta.max + pd.Timedelta('1us')
-        with pytest.raises(OverflowError):
+        with pytest.raises(OverflowError, match=msg):
             t1 - tmin
 
         tmax = pd.to_datetime([pd.Timestamp.max])
         t2 = tmax + pd.Timedelta.min - pd.Timedelta('1us')
-        with pytest.raises(OverflowError):
+        with pytest.raises(OverflowError, match=msg):
             tmax - t2
 
 
@@ -1543,7 +1550,8 @@ def test_empty_series_add_sub(self):
         tm.assert_series_equal(a, a + b)
         tm.assert_series_equal(a, a - b)
         tm.assert_series_equal(a, b + a)
-        with pytest.raises(TypeError):
+        msg = 'cannot subtract'
+        with pytest.raises(TypeError, match=msg):
             b - a
 
     def test_operators_datetimelike(self):
@@ -1688,12 +1696,13 @@ def test_datetime64_ops_nat(self):
         # subtraction
         tm.assert_series_equal(-NaT + datetime_series,
                                nat_series_dtype_timestamp)
-        with pytest.raises(TypeError):
+        msg = 'Unary negative expects'
+        with pytest.raises(TypeError, match=msg):
             -single_nat_dtype_datetime + datetime_series
 
         tm.assert_series_equal(-NaT + nat_series_dtype_timestamp,
                                nat_series_dtype_timestamp)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             -single_nat_dtype_datetime + nat_series_dtype_timestamp
 
         # addition
@@ -1718,15 +1727,16 @@ def test_datetime64_ops_nat(self):
     @pytest.mark.parametrize('one', [1, 1.0, np.array(1)])
     def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
         # multiplication
-        with pytest.raises(TypeError):
+        msg = 'cannot perform .* with this index type'
+        with pytest.raises(TypeError, match=msg):
             dt64_series * one
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             one * dt64_series
 
         # division
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dt64_series / one
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             one / dt64_series
 
     @pytest.mark.parametrize('op', ['__add__', '__radd__',
@@ -1740,13 +1750,17 @@ def test_dt64_series_add_intlike(self, tz, op):
         other = Series([20, 30, 40], dtype='uint8')
 
         method = getattr(ser, op)
-        with pytest.raises(TypeError):
+        msg = '|'.join(['incompatible type for a .* operation',
+                        'cannot evaluate a numeric op',
+                        'ufunc .* cannot use operands',
+                        'cannot (add|subtract)'])
+        with pytest.raises(TypeError, match=msg):
             method(1)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             method(other)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             method(other.values)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             method(pd.Index(other))
 
     # -------------------------------------------------------------
@@ -1783,13 +1797,14 @@ def test_operators_datetimelike_with_timezones(self):
         result = dt1 - td1[0]
         exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
         tm.assert_series_equal(result, exp)
-        with pytest.raises(TypeError):
+        msg = "bad operand type for unary"
+        with pytest.raises(TypeError, match=msg):
             td1[0] - dt1
 
         result = dt2 - td2[0]
         exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
         tm.assert_series_equal(result, exp)
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             td2[0] - dt2
 
         result = dt1 + td1
@@ -1807,10 +1822,10 @@ def test_operators_datetimelike_with_timezones(self):
         result = dt2 - td2
         exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
         tm.assert_series_equal(result, exp)
-
-        with pytest.raises(TypeError):
+        msg = 'cannot (add|subtract)'
+        with pytest.raises(TypeError, match=msg):
             td1 - dt1
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             td2 - dt2
 
 
@@ -1909,13 +1924,15 @@ def test_dti_add_intarray_no_freq(self, int_holder):
         # GH#19959
         dti = pd.DatetimeIndex(['2016-01-01', 'NaT', '2017-04-05 06:07:08'])
         other = int_holder([9, 4, -1])
-        with pytest.raises(NullFrequencyError):
+        nfmsg = 'Cannot shift with no freq'
+        tmsg = 'cannot subtract DatetimeArray from'
+        with pytest.raises(NullFrequencyError, match=nfmsg):
             dti + other
-        with pytest.raises(NullFrequencyError):
+        with pytest.raises(NullFrequencyError, match=nfmsg):
             other + dti
-        with pytest.raises(NullFrequencyError):
+        with pytest.raises(NullFrequencyError, match=nfmsg):
             dti - other
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=tmsg):
             other - dti
 
     # -------------------------------------------------------------
@@ -2057,14 +2074,14 @@ def test_sub_dti_dti(self):
 
         result = dti_tz - dti_tz
         tm.assert_index_equal(result, expected)
-
-        with pytest.raises(TypeError):
+        msg = 'DatetimeArray subtraction must have the same timezones or'
+        with pytest.raises(TypeError, match=msg):
             dti_tz - dti
 
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dti - dti_tz
 
-        with pytest.raises(TypeError):
+        with pytest.raises(TypeError, match=msg):
             dti_tz - dti_tz2
 
         # isub
@@ -2074,7 +2091,8 @@ def test_sub_dti_dti(self):
         # different length raises ValueError
         dti1 = date_range('20130101', periods=3)
         dti2 = date_range('20130101', periods=4)
-        with pytest.raises(ValueError):
+        msg = 'cannot add indices of unequal length'
+        with pytest.raises(ValueError, match=msg):
             dti1 - dti2
 
         # NaN propagation
@@ -2148,8 +2166,8 @@ def test_ops_nat_mixed_datetime64_timedelta64(self):
         tm.assert_series_equal(-single_nat_dtype_timedelta +
                                nat_series_dtype_timestamp,
                                nat_series_dtype_timestamp)
-
-        with pytest.raises(TypeError):
+        msg = 'cannot subtract a datelike'
+        with pytest.raises(TypeError, match=msg):
             timedelta_series - single_nat_dtype_datetime
 
         # addition

From dd7afa0561001db0b458ba314505a1e95706a9bf Mon Sep 17 00:00:00 2001
From: Alexander Ponomaroff
 <33966871+alexander-ponomaroff@users.noreply.github.com>
Date: Sat, 26 Jan 2019 09:50:49 -0500
Subject: [PATCH 34/48] Add tests for NaT when performing dt.to_period (#24921)

---
 pandas/tests/series/test_period.py | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index 0a86bb0b67797..7e0feb418e8df 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -164,3 +164,12 @@ def test_end_time_timevalues(self, input_vals):
         result = s.dt.end_time
         expected = s.apply(lambda x: x.end_time)
         tm.assert_series_equal(result, expected)
+
+    @pytest.mark.parametrize('input_vals', [
+        ('2001'), ('NaT')
+    ])
+    def test_to_period(self, input_vals):
+        # GH 21205
+        expected = Series([input_vals], dtype='Period[D]')
+        result = Series([input_vals], dtype='datetime64[ns]').dt.to_period('D')
+        tm.assert_series_equal(result, expected)

From b641963e0dcf42d6051f0467ac6d52b974801898 Mon Sep 17 00:00:00 2001
From: h-vetinari <33685575+h-vetinari@users.noreply.github.com>
Date: Sat, 26 Jan 2019 15:53:58 +0100
Subject: [PATCH 35/48] DOC: switch headline whatsnew to 0.25 (#24941)

---
 doc/source/index.rst.template   | 2 +-
 doc/source/whatsnew/v0.25.0.rst | 7 +++++--
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 51487c0d325b5..d04e9194e71dc 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -39,7 +39,7 @@ See the :ref:`overview` for more detail about what's in the library.
 {% endif %}
 
     {% if not single_doc -%}
-    What's New in 0.24.0 <whatsnew/v0.24.0>
+    What's New in 0.25.0 <whatsnew/v0.25.0>
     install
     getting_started/index
     user_guide/index
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index fac42dbd9c7c8..5129449e4fdf3 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -1,10 +1,13 @@
-:orphan:
-
 .. _whatsnew_0250:
 
 What's New in 0.25.0 (April XX, 2019)
 -------------------------------------
 
+.. warning::
+
+   Starting with the 0.25.x series of releases, pandas only supports Python 3.5 and higher.
+   See :ref:`install.dropping-27` for more details.
+
 {{ header }}
 
 These are the changes in pandas 0.25.0. See :ref:`release` for a full changelog

From 41166c650f733079e8012161aeca189ecfa62919 Mon Sep 17 00:00:00 2001
From: Justin Zheng <justinzhengbc@gmail.com>
Date: Sat, 26 Jan 2019 06:58:07 -0800
Subject: [PATCH 36/48] BUG-24212 fix regression in #24897 (#24916)

---
 doc/source/whatsnew/v0.24.1.rst          |  3 ++
 pandas/core/reshape/merge.py             | 45 ++++++++++++++++++++++--
 pandas/tests/reshape/merge/test_merge.py | 31 ++++++++--------
 3 files changed, 60 insertions(+), 19 deletions(-)

diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index ee4b7ab62b31a..3ac2ed73ea53f 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -63,6 +63,9 @@ Bug Fixes
 -
 -
 
+**Reshaping**
+
+- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`)
 
 **Other**
 
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index e11847d2b8ce2..1dd19a7c1514e 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -757,13 +757,21 @@ def _get_join_info(self):
 
             if self.right_index:
                 if len(self.left) > 0:
-                    join_index = self.left.index.take(left_indexer)
+                    join_index = self._create_join_index(self.left.index,
+                                                         self.right.index,
+                                                         left_indexer,
+                                                         right_indexer,
+                                                         how='right')
                 else:
                     join_index = self.right.index.take(right_indexer)
                     left_indexer = np.array([-1] * len(join_index))
             elif self.left_index:
                 if len(self.right) > 0:
-                    join_index = self.right.index.take(right_indexer)
+                    join_index = self._create_join_index(self.right.index,
+                                                         self.left.index,
+                                                         right_indexer,
+                                                         left_indexer,
+                                                         how='left')
                 else:
                     join_index = self.left.index.take(left_indexer)
                     right_indexer = np.array([-1] * len(join_index))
@@ -774,6 +782,39 @@ def _get_join_info(self):
             join_index = join_index.astype(object)
         return join_index, left_indexer, right_indexer
 
+    def _create_join_index(self, index, other_index, indexer,
+                           other_indexer, how='left'):
+        """
+        Create a join index by rearranging one index to match another
+
+        Parameters
+        ----------
+        index: Index being rearranged
+        other_index: Index used to supply values not found in index
+        indexer: how to rearrange index
+        how: replacement is only necessary if indexer based on other_index
+
+        Returns
+        -------
+        join_index
+        """
+        join_index = index.take(indexer)
+        if (self.how in (how, 'outer') and
+                not isinstance(other_index, MultiIndex)):
+            # if final index requires values in other_index but not target
+            # index, indexer may hold missing (-1) values, causing Index.take
+            # to take the final value in target index
+            mask = indexer == -1
+            if np.any(mask):
+                # if values missing (-1) from target index,
+                # take from other_index instead
+                join_list = join_index.to_numpy()
+                other_list = other_index.take(other_indexer).to_numpy()
+                join_list[mask] = other_list[mask]
+                join_index = Index(join_list, dtype=join_index.dtype,
+                                   name=join_index.name)
+        return join_index
+
     def _get_merge_keys(self):
         """
         Note: has side effects (copy/delete key columns)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index f0a3ddc8ce8a4..c17c301968269 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -939,25 +939,22 @@ def test_merge_two_empty_df_no_division_error(self):
         with np.errstate(divide='raise'):
             merge(a, a, on=('a', 'b'))
 
-    @pytest.mark.parametrize('how', ['left', 'outer'])
-    @pytest.mark.xfail(reason="GH-24897")
+    @pytest.mark.parametrize('how', ['right', 'outer'])
     def test_merge_on_index_with_more_values(self, how):
         # GH 24212
-        # pd.merge gets [-1, -1, 0, 1] as right_indexer, ensure that -1 is
-        # interpreted as a missing value instead of the last element
-        df1 = pd.DataFrame([[1, 2], [2, 4], [3, 6], [4, 8]],
-                           columns=['a', 'b'])
-        df2 = pd.DataFrame([[3, 30], [4, 40]],
-                           columns=['a', 'c'])
-        df1.set_index('a', drop=False, inplace=True)
-        df2.set_index('a', inplace=True)
-        result = pd.merge(df1, df2, left_index=True, right_on='a', how=how)
-        expected = pd.DataFrame([[1, 2, np.nan],
-                                 [2, 4, np.nan],
-                                 [3, 6, 30.0],
-                                 [4, 8, 40.0]],
-                                columns=['a', 'b', 'c'])
-        expected.set_index('a', drop=False, inplace=True)
+        # pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that
+        # -1 is interpreted as a missing value instead of the last element
+        df1 = pd.DataFrame({'a': [1, 2, 3], 'key': [0, 2, 2]})
+        df2 = pd.DataFrame({'b': [1, 2, 3, 4, 5]})
+        result = df1.merge(df2, left_on='key', right_index=True, how=how)
+        expected = pd.DataFrame([[1.0, 0, 1],
+                                 [2.0, 2, 3],
+                                 [3.0, 2, 3],
+                                 [np.nan, 1, 2],
+                                 [np.nan, 3, 4],
+                                 [np.nan, 4, 5]],
+                                columns=['a', 'key', 'b'])
+        expected.set_index(Int64Index([0, 1, 2, 1, 3, 4]), inplace=True)
         assert_frame_equal(result, expected)
 
     def test_merge_right_index_right(self):

From 69a2c54ef4c3df06104a0528b2d59a5b7521b23f Mon Sep 17 00:00:00 2001
From: Christopher Whelan <topherwhelan@gmail.com>
Date: Sat, 26 Jan 2019 07:08:50 -0800
Subject: [PATCH 37/48] CLN: reduce overhead in setup for categoricals
 benchmarks in asv (#24913)

---
 asv_bench/benchmarks/categoricals.py | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index e5dab0cb066aa..4b5b2848f7e0f 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -223,12 +223,19 @@ class CategoricalSlicing(object):
 
     def setup(self, index):
         N = 10**6
-        values = list('a' * N + 'b' * N + 'c' * N)
-        indices = {
-            'monotonic_incr': pd.Categorical(values),
-            'monotonic_decr': pd.Categorical(reversed(values)),
-            'non_monotonic': pd.Categorical(list('abc' * N))}
-        self.data = indices[index]
+        categories = ['a', 'b', 'c']
+        values = [0] * N + [1] * N + [2] * N
+        if index == 'monotonic_incr':
+            self.data = pd.Categorical.from_codes(values,
+                                                  categories=categories)
+        elif index == 'monotonic_decr':
+            self.data = pd.Categorical.from_codes(list(reversed(values)),
+                                                  categories=categories)
+        elif index == 'non_monotonic':
+            self.data = pd.Categorical.from_codes([0, 1, 2] * N,
+                                                  categories=categories)
+        else:
+            raise ValueError('Invalid index param: {}'.format(index))
 
         self.scalar = 10000
         self.list = list(range(10000))

From 95415406d191eb5b0886ae107a7d01f0ef39b019 Mon Sep 17 00:00:00 2001
From: William Ayd <william.ayd@icloud.com>
Date: Sat, 26 Jan 2019 07:34:46 -0800
Subject: [PATCH 38/48] Excel Reader Refactor - Base Class Introduction
 (#24829)

---
 pandas/io/excel.py | 235 +++++++++++++++++++++++++--------------------
 1 file changed, 133 insertions(+), 102 deletions(-)

diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 3a7c39ec65309..3d85ae7fd1f46 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -375,60 +375,25 @@ def read_excel(io,
         **kwds)
 
 
-class _XlrdReader(object):
-
-    def __init__(self, filepath_or_buffer):
-        """Reader using xlrd engine.
-
-        Parameters
-        ----------
-        filepath_or_buffer : string, path object or Workbook
-            Object to be parsed.
-        """
-        err_msg = "Install xlrd >= 1.0.0 for Excel support"
-
-        try:
-            import xlrd
-        except ImportError:
-            raise ImportError(err_msg)
-        else:
-            if xlrd.__VERSION__ < LooseVersion("1.0.0"):
-                raise ImportError(err_msg +
-                                  ". Current version " + xlrd.__VERSION__)
+@add_metaclass(abc.ABCMeta)
+class _BaseExcelReader(object):
 
-        # If filepath_or_buffer is a url, want to keep the data as bytes so
-        # can't pass to get_filepath_or_buffer()
-        if _is_url(filepath_or_buffer):
-            filepath_or_buffer = _urlopen(filepath_or_buffer)
-        elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)):
-            filepath_or_buffer, _, _, _ = get_filepath_or_buffer(
-                filepath_or_buffer)
+    @property
+    @abc.abstractmethod
+    def sheet_names(self):
+        pass
 
-        if isinstance(filepath_or_buffer, xlrd.Book):
-            self.book = filepath_or_buffer
-        elif not isinstance(filepath_or_buffer, xlrd.Book) and hasattr(
-                filepath_or_buffer, "read"):
-            # N.B. xlrd.Book has a read attribute too
-            if hasattr(filepath_or_buffer, 'seek'):
-                try:
-                    # GH 19779
-                    filepath_or_buffer.seek(0)
-                except UnsupportedOperation:
-                    # HTTPResponse does not support seek()
-                    # GH 20434
-                    pass
+    @abc.abstractmethod
+    def get_sheet_by_name(self, name):
+        pass
 
-            data = filepath_or_buffer.read()
-            self.book = xlrd.open_workbook(file_contents=data)
-        elif isinstance(filepath_or_buffer, compat.string_types):
-            self.book = xlrd.open_workbook(filepath_or_buffer)
-        else:
-            raise ValueError('Must explicitly set engine if not passing in'
-                             ' buffer or path for io.')
+    @abc.abstractmethod
+    def get_sheet_by_index(self, index):
+        pass
 
-    @property
-    def sheet_names(self):
-        return self.book.sheet_names()
+    @abc.abstractmethod
+    def get_sheet_data(self, sheet, convert_float):
+        pass
 
     def parse(self,
               sheet_name=0,
@@ -455,48 +420,6 @@ def parse(self,
 
         _validate_header_arg(header)
 
-        from xlrd import (xldate, XL_CELL_DATE,
-                          XL_CELL_ERROR, XL_CELL_BOOLEAN,
-                          XL_CELL_NUMBER)
-
-        epoch1904 = self.book.datemode
-
-        def _parse_cell(cell_contents, cell_typ):
-            """converts the contents of the cell into a pandas
-               appropriate object"""
-
-            if cell_typ == XL_CELL_DATE:
-
-                # Use the newer xlrd datetime handling.
-                try:
-                    cell_contents = xldate.xldate_as_datetime(
-                        cell_contents, epoch1904)
-                except OverflowError:
-                    return cell_contents
-
-                # Excel doesn't distinguish between dates and time,
-                # so we treat dates on the epoch as times only.
-                # Also, Excel supports 1900 and 1904 epochs.
-                year = (cell_contents.timetuple())[0:3]
-                if ((not epoch1904 and year == (1899, 12, 31)) or
-                        (epoch1904 and year == (1904, 1, 1))):
-                    cell_contents = time(cell_contents.hour,
-                                         cell_contents.minute,
-                                         cell_contents.second,
-                                         cell_contents.microsecond)
-
-            elif cell_typ == XL_CELL_ERROR:
-                cell_contents = np.nan
-            elif cell_typ == XL_CELL_BOOLEAN:
-                cell_contents = bool(cell_contents)
-            elif convert_float and cell_typ == XL_CELL_NUMBER:
-                # GH5394 - Excel 'numbers' are always floats
-                # it's a minimal perf hit and less surprising
-                val = int(cell_contents)
-                if val == cell_contents:
-                    cell_contents = val
-            return cell_contents
-
         ret_dict = False
 
         # Keep sheetname to maintain backwards compatibility.
@@ -504,7 +427,7 @@ def _parse_cell(cell_contents, cell_typ):
             sheets = sheet_name
             ret_dict = True
         elif sheet_name is None:
-            sheets = self.book.sheet_names()
+            sheets = self.sheet_names
             ret_dict = True
         else:
             sheets = [sheet_name]
@@ -519,19 +442,13 @@ def _parse_cell(cell_contents, cell_typ):
                 print("Reading sheet {sheet}".format(sheet=asheetname))
 
             if isinstance(asheetname, compat.string_types):
-                sheet = self.book.sheet_by_name(asheetname)
+                sheet = self.get_sheet_by_name(asheetname)
             else:  # assume an integer if not a string
-                sheet = self.book.sheet_by_index(asheetname)
+                sheet = self.get_sheet_by_index(asheetname)
 
-            data = []
+            data = self.get_sheet_data(sheet, convert_float)
             usecols = _maybe_convert_usecols(usecols)
 
-            for i in range(sheet.nrows):
-                row = [_parse_cell(value, typ)
-                       for value, typ in zip(sheet.row_values(i),
-                                             sheet.row_types(i))]
-                data.append(row)
-
             if sheet.nrows == 0:
                 output[asheetname] = DataFrame()
                 continue
@@ -620,6 +537,120 @@ def _parse_cell(cell_contents, cell_typ):
             return output[asheetname]
 
 
+class _XlrdReader(_BaseExcelReader):
+
+    def __init__(self, filepath_or_buffer):
+        """Reader using xlrd engine.
+
+        Parameters
+        ----------
+        filepath_or_buffer : string, path object or Workbook
+            Object to be parsed.
+        """
+        err_msg = "Install xlrd >= 1.0.0 for Excel support"
+
+        try:
+            import xlrd
+        except ImportError:
+            raise ImportError(err_msg)
+        else:
+            if xlrd.__VERSION__ < LooseVersion("1.0.0"):
+                raise ImportError(err_msg +
+                                  ". Current version " + xlrd.__VERSION__)
+
+        # If filepath_or_buffer is a url, want to keep the data as bytes so
+        # can't pass to get_filepath_or_buffer()
+        if _is_url(filepath_or_buffer):
+            filepath_or_buffer = _urlopen(filepath_or_buffer)
+        elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)):
+            filepath_or_buffer, _, _, _ = get_filepath_or_buffer(
+                filepath_or_buffer)
+
+        if isinstance(filepath_or_buffer, xlrd.Book):
+            self.book = filepath_or_buffer
+        elif hasattr(filepath_or_buffer, "read"):
+            # N.B. xlrd.Book has a read attribute too
+            if hasattr(filepath_or_buffer, 'seek'):
+                try:
+                    # GH 19779
+                    filepath_or_buffer.seek(0)
+                except UnsupportedOperation:
+                    # HTTPResponse does not support seek()
+                    # GH 20434
+                    pass
+
+            data = filepath_or_buffer.read()
+            self.book = xlrd.open_workbook(file_contents=data)
+        elif isinstance(filepath_or_buffer, compat.string_types):
+            self.book = xlrd.open_workbook(filepath_or_buffer)
+        else:
+            raise ValueError('Must explicitly set engine if not passing in'
+                             ' buffer or path for io.')
+
+    @property
+    def sheet_names(self):
+        return self.book.sheet_names()
+
+    def get_sheet_by_name(self, name):
+        return self.book.sheet_by_name(name)
+
+    def get_sheet_by_index(self, index):
+        return self.book.sheet_by_index(index)
+
+    def get_sheet_data(self, sheet, convert_float):
+        from xlrd import (xldate, XL_CELL_DATE,
+                          XL_CELL_ERROR, XL_CELL_BOOLEAN,
+                          XL_CELL_NUMBER)
+
+        epoch1904 = self.book.datemode
+
+        def _parse_cell(cell_contents, cell_typ):
+            """converts the contents of the cell into a pandas
+               appropriate object"""
+
+            if cell_typ == XL_CELL_DATE:
+
+                # Use the newer xlrd datetime handling.
+                try:
+                    cell_contents = xldate.xldate_as_datetime(
+                        cell_contents, epoch1904)
+                except OverflowError:
+                    return cell_contents
+
+                # Excel doesn't distinguish between dates and time,
+                # so we treat dates on the epoch as times only.
+                # Also, Excel supports 1900 and 1904 epochs.
+                year = (cell_contents.timetuple())[0:3]
+                if ((not epoch1904 and year == (1899, 12, 31)) or
+                        (epoch1904 and year == (1904, 1, 1))):
+                    cell_contents = time(cell_contents.hour,
+                                         cell_contents.minute,
+                                         cell_contents.second,
+                                         cell_contents.microsecond)
+
+            elif cell_typ == XL_CELL_ERROR:
+                cell_contents = np.nan
+            elif cell_typ == XL_CELL_BOOLEAN:
+                cell_contents = bool(cell_contents)
+            elif convert_float and cell_typ == XL_CELL_NUMBER:
+                # GH5394 - Excel 'numbers' are always floats
+                # it's a minimal perf hit and less surprising
+                val = int(cell_contents)
+                if val == cell_contents:
+                    cell_contents = val
+            return cell_contents
+
+        data = []
+
+        for i in range(sheet.nrows):
+            row = [_parse_cell(value, typ)
+                   for value, typ in zip(sheet.row_values(i),
+                                         sheet.row_types(i))]
+            data.append(row)
+
+        return data
+
+
 class ExcelFile(object):
     """
     Class for parsing tabular excel sheets into DataFrame objects.

From 37a224d216e8d49a05681e2c87482bd8e67385fc Mon Sep 17 00:00:00 2001
From: gfyoung <gfyoung17+GitHub@gmail.com>
Date: Sat, 26 Jan 2019 07:42:22 -0800
Subject: [PATCH 39/48] TST/REF: Add pytest idiom to test_numeric.py (#24946)

---
 pandas/tests/tools/test_numeric.py | 875 +++++++++++++++--------------
 1 file changed, 450 insertions(+), 425 deletions(-)

diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py
index 537881f3a5e85..3822170d884aa 100644
--- a/pandas/tests/tools/test_numeric.py
+++ b/pandas/tests/tools/test_numeric.py
@@ -5,436 +5,461 @@
 import pytest
 
 import pandas as pd
-from pandas import to_numeric
+from pandas import DataFrame, Index, Series, to_numeric
 from pandas.util import testing as tm
 
 
-class TestToNumeric(object):
-
-    def test_empty(self):
-        # see gh-16302
-        s = pd.Series([], dtype=object)
-
-        res = to_numeric(s)
-        expected = pd.Series([], dtype=np.int64)
-
-        tm.assert_series_equal(res, expected)
-
-        # Original issue example
-        res = to_numeric(s, errors='coerce', downcast='integer')
-        expected = pd.Series([], dtype=np.int8)
-
-        tm.assert_series_equal(res, expected)
-
-    def test_series(self):
-        s = pd.Series(['1', '-3.14', '7'])
-        res = to_numeric(s)
-        expected = pd.Series([1, -3.14, 7])
-        tm.assert_series_equal(res, expected)
-
-        s = pd.Series(['1', '-3.14', 7])
-        res = to_numeric(s)
-        tm.assert_series_equal(res, expected)
-
-    def test_series_numeric(self):
-        s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
-        res = to_numeric(s)
-        tm.assert_series_equal(res, s)
-
-        s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
-        res = to_numeric(s)
-        tm.assert_series_equal(res, s)
-
-        # bool is regarded as numeric
-        s = pd.Series([True, False, True, True],
-                      index=list('ABCD'), name='XXX')
-        res = to_numeric(s)
-        tm.assert_series_equal(res, s)
-
-    def test_error(self):
-        s = pd.Series([1, -3.14, 'apple'])
-        msg = 'Unable to parse string "apple" at position 2'
-        with pytest.raises(ValueError, match=msg):
-            to_numeric(s, errors='raise')
-
-        res = to_numeric(s, errors='ignore')
-        expected = pd.Series([1, -3.14, 'apple'])
-        tm.assert_series_equal(res, expected)
-
-        res = to_numeric(s, errors='coerce')
-        expected = pd.Series([1, -3.14, np.nan])
-        tm.assert_series_equal(res, expected)
-
-        s = pd.Series(['orange', 1, -3.14, 'apple'])
-        msg = 'Unable to parse string "orange" at position 0'
-        with pytest.raises(ValueError, match=msg):
-            to_numeric(s, errors='raise')
-
-    def test_error_seen_bool(self):
-        s = pd.Series([True, False, 'apple'])
-        msg = 'Unable to parse string "apple" at position 2'
-        with pytest.raises(ValueError, match=msg):
-            to_numeric(s, errors='raise')
-
-        res = to_numeric(s, errors='ignore')
-        expected = pd.Series([True, False, 'apple'])
-        tm.assert_series_equal(res, expected)
-
-        # coerces to float
-        res = to_numeric(s, errors='coerce')
-        expected = pd.Series([1., 0., np.nan])
-        tm.assert_series_equal(res, expected)
-
-    def test_list(self):
-        s = ['1', '-3.14', '7']
-        res = to_numeric(s)
-        expected = np.array([1, -3.14, 7])
-        tm.assert_numpy_array_equal(res, expected)
-
-    def test_list_numeric(self):
-        s = [1, 3, 4, 5]
-        res = to_numeric(s)
-        tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
-
-        s = [1., 3., 4., 5.]
-        res = to_numeric(s)
-        tm.assert_numpy_array_equal(res, np.array(s))
-
-        # bool is regarded as numeric
-        s = [True, False, True, True]
-        res = to_numeric(s)
-        tm.assert_numpy_array_equal(res, np.array(s))
-
-    def test_numeric(self):
-        s = pd.Series([1, -3.14, 7], dtype='O')
-        res = to_numeric(s)
-        expected = pd.Series([1, -3.14, 7])
-        tm.assert_series_equal(res, expected)
-
-        s = pd.Series([1, -3.14, 7])
-        res = to_numeric(s)
-        tm.assert_series_equal(res, expected)
-
-        # GH 14827
-        df = pd.DataFrame(dict(
-            a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
-            b=[1.0, 2.0, 3.0, 4.0],
-        ))
-        expected = pd.DataFrame(dict(
-            a=[1.2, 3.14, np.inf, 0.1],
-            b=[1.0, 2.0, 3.0, 4.0],
-        ))
-
-        # Test to_numeric over one column
-        df_copy = df.copy()
-        df_copy['a'] = df_copy['a'].apply(to_numeric)
-        tm.assert_frame_equal(df_copy, expected)
-
-        # Test to_numeric over multiple columns
-        df_copy = df.copy()
-        df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
-        tm.assert_frame_equal(df_copy, expected)
-
-    def test_numeric_lists_and_arrays(self):
-        # Test to_numeric with embedded lists and arrays
-        df = pd.DataFrame(dict(
-            a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
-        ))
-        df['a'] = df['a'].apply(to_numeric)
-        expected = pd.DataFrame(dict(
-            a=[[3.14, 1.0], 1.6, 0.1],
-        ))
-        tm.assert_frame_equal(df, expected)
-
-        df = pd.DataFrame(dict(
-            a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
-        ))
-        df['a'] = df['a'].apply(to_numeric)
-        expected = pd.DataFrame(dict(
-            a=[[3.14, 1.0], 0.1],
-        ))
-        tm.assert_frame_equal(df, expected)
-
-    def test_all_nan(self):
-        s = pd.Series(['a', 'b', 'c'])
-        res = to_numeric(s, errors='coerce')
-        expected = pd.Series([np.nan, np.nan, np.nan])
-        tm.assert_series_equal(res, expected)
-
-    @pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
-    def test_type_check(self, errors):
-        # see gh-11776
-        df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
-        kwargs = dict(errors=errors) if errors is not None else dict()
-        error_ctx = pytest.raises(TypeError, match="1-d array")
-
-        with error_ctx:
-            to_numeric(df, **kwargs)
-
-    def test_scalar(self):
-        assert pd.to_numeric(1) == 1
-        assert pd.to_numeric(1.1) == 1.1
-
-        assert pd.to_numeric('1') == 1
-        assert pd.to_numeric('1.1') == 1.1
-
-        with pytest.raises(ValueError):
-            to_numeric('XX', errors='raise')
-
-        assert to_numeric('XX', errors='ignore') == 'XX'
-        assert np.isnan(to_numeric('XX', errors='coerce'))
-
-    def test_numeric_dtypes(self):
-        idx = pd.Index([1, 2, 3], name='xxx')
-        res = pd.to_numeric(idx)
-        tm.assert_index_equal(res, idx)
-
-        res = pd.to_numeric(pd.Series(idx, name='xxx'))
-        tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
-
-        res = pd.to_numeric(idx.values)
-        tm.assert_numpy_array_equal(res, idx.values)
-
-        idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
-        res = pd.to_numeric(idx)
-        tm.assert_index_equal(res, idx)
-
-        res = pd.to_numeric(pd.Series(idx, name='xxx'))
-        tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
-
-        res = pd.to_numeric(idx.values)
-        tm.assert_numpy_array_equal(res, idx.values)
-
-    def test_str(self):
-        idx = pd.Index(['1', '2', '3'], name='xxx')
-        exp = np.array([1, 2, 3], dtype='int64')
-        res = pd.to_numeric(idx)
-        tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
-
-        res = pd.to_numeric(pd.Series(idx, name='xxx'))
-        tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
-
-        res = pd.to_numeric(idx.values)
-        tm.assert_numpy_array_equal(res, exp)
-
-        idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
-        exp = np.array([1.5, 2.7, 3.4])
-        res = pd.to_numeric(idx)
-        tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
-
-        res = pd.to_numeric(pd.Series(idx, name='xxx'))
-        tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
-
-        res = pd.to_numeric(idx.values)
-        tm.assert_numpy_array_equal(res, exp)
-
-    def test_datetime_like(self, tz_naive_fixture):
-        idx = pd.date_range("20130101", periods=3,
-                            tz=tz_naive_fixture, name="xxx")
-        res = pd.to_numeric(idx)
-        tm.assert_index_equal(res, pd.Index(idx.asi8, name="xxx"))
-
-        res = pd.to_numeric(pd.Series(idx, name="xxx"))
-        tm.assert_series_equal(res, pd.Series(idx.asi8, name="xxx"))
-
-        res = pd.to_numeric(idx.values)
-        tm.assert_numpy_array_equal(res, idx.asi8)
-
-    def test_timedelta(self):
-        idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
-        res = pd.to_numeric(idx)
-        tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
-
-        res = pd.to_numeric(pd.Series(idx, name='xxx'))
-        tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
-
-        res = pd.to_numeric(idx.values)
-        tm.assert_numpy_array_equal(res, idx.asi8)
-
-    def test_period(self):
-        idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
-        res = pd.to_numeric(idx)
-        tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
-
-        # TODO: enable when we can support native PeriodDtype
-        # res = pd.to_numeric(pd.Series(idx, name='xxx'))
-        # tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
-
-    def test_non_hashable(self):
-        # Test for Bug #13324
-        s = pd.Series([[10.0, 2], 1.0, 'apple'])
-        res = pd.to_numeric(s, errors='coerce')
-        tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
-
-        res = pd.to_numeric(s, errors='ignore')
-        tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
-
-        with pytest.raises(TypeError, match="Invalid object type"):
-            pd.to_numeric(s)
-
-    @pytest.mark.parametrize("data", [
-        ["1", 2, 3],
-        [1, 2, 3],
-        np.array(["1970-01-02", "1970-01-03",
-                  "1970-01-04"], dtype="datetime64[D]")
-    ])
-    def test_downcast_basic(self, data):
-        # see gh-13352
-        invalid_downcast = "unsigned-integer"
-        msg = "invalid downcasting method provided"
-
-        with pytest.raises(ValueError, match=msg):
-            pd.to_numeric(data, downcast=invalid_downcast)
-
-        expected = np.array([1, 2, 3], dtype=np.int64)
-
-        # Basic function tests.
-        res = pd.to_numeric(data)
-        tm.assert_numpy_array_equal(res, expected)
-
-        res = pd.to_numeric(data, downcast=None)
-        tm.assert_numpy_array_equal(res, expected)
-
-        # Basic dtype support.
-        smallest_uint_dtype = np.dtype(np.typecodes["UnsignedInteger"][0])
-
-        # Support below np.float32 is rare and far between.
-        float_32_char = np.dtype(np.float32).char
-        smallest_float_dtype = float_32_char
-
-        expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
-        res = pd.to_numeric(data, downcast="unsigned")
-        tm.assert_numpy_array_equal(res, expected)
-
-        expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
-        res = pd.to_numeric(data, downcast="float")
-        tm.assert_numpy_array_equal(res, expected)
-
-    @pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
-    @pytest.mark.parametrize("data", [
-        ["1", 2, 3],
-        [1, 2, 3],
-        np.array(["1970-01-02", "1970-01-03",
-                  "1970-01-04"], dtype="datetime64[D]")
-    ])
-    def test_signed_downcast(self, data, signed_downcast):
-        # see gh-13352
-        smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
-        expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
-
-        res = pd.to_numeric(data, downcast=signed_downcast)
-        tm.assert_numpy_array_equal(res, expected)
-
-    def test_ignore_downcast_invalid_data(self):
-        # If we can't successfully cast the given
-        # data to a numeric dtype, do not bother
-        # with the downcast parameter.
-        data = ["foo", 2, 3]
-        expected = np.array(data, dtype=object)
-
-        res = pd.to_numeric(data, errors="ignore",
-                            downcast="unsigned")
-        tm.assert_numpy_array_equal(res, expected)
-
-    def test_ignore_downcast_neg_to_unsigned(self):
-        # Cannot cast to an unsigned integer
-        # because we have a negative number.
-        data = ["-1", 2, 3]
-        expected = np.array([-1, 2, 3], dtype=np.int64)
-
-        res = pd.to_numeric(data, downcast="unsigned")
-        tm.assert_numpy_array_equal(res, expected)
-
-    @pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
-    @pytest.mark.parametrize("data,expected", [
-        (["1.1", 2, 3],
-         np.array([1.1, 2, 3], dtype=np.float64)),
-        ([10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
-         np.array([10000.0, 20000, 3000,
-                   40000.36, 50000, 50000.00], dtype=np.float64))
-    ])
-    def test_ignore_downcast_cannot_convert_float(
-            self, data, expected, downcast):
-        # Cannot cast to an integer (signed or unsigned)
-        # because we have a float number.
-        res = pd.to_numeric(data, downcast=downcast)
-        tm.assert_numpy_array_equal(res, expected)
-
-    @pytest.mark.parametrize("downcast,expected_dtype", [
-        ("integer", np.int16),
-        ("signed", np.int16),
-        ("unsigned", np.uint16)
-    ])
-    def test_downcast_not8bit(self, downcast, expected_dtype):
-        # the smallest integer dtype need not be np.(u)int8
-        data = ["256", 257, 258]
-
-        expected = np.array([256, 257, 258], dtype=expected_dtype)
-        res = pd.to_numeric(data, downcast=downcast)
-        tm.assert_numpy_array_equal(res, expected)
-
-    @pytest.mark.parametrize("dtype,downcast,min_max", [
-        ("int8", "integer", [iinfo(np.int8).min,
-                             iinfo(np.int8).max]),
-        ("int16", "integer", [iinfo(np.int16).min,
-                              iinfo(np.int16).max]),
-        ('int32', "integer", [iinfo(np.int32).min,
-                              iinfo(np.int32).max]),
-        ('int64', "integer", [iinfo(np.int64).min,
-                              iinfo(np.int64).max]),
-        ('uint8', "unsigned", [iinfo(np.uint8).min,
-                               iinfo(np.uint8).max]),
-        ('uint16', "unsigned", [iinfo(np.uint16).min,
-                                iinfo(np.uint16).max]),
-        ('uint32', "unsigned", [iinfo(np.uint32).min,
-                                iinfo(np.uint32).max]),
-        ('uint64', "unsigned", [iinfo(np.uint64).min,
-                                iinfo(np.uint64).max]),
-        ('int16', "integer", [iinfo(np.int8).min,
-                              iinfo(np.int8).max + 1]),
-        ('int32', "integer", [iinfo(np.int16).min,
-                              iinfo(np.int16).max + 1]),
-        ('int64', "integer", [iinfo(np.int32).min,
-                              iinfo(np.int32).max + 1]),
-        ('int16', "integer", [iinfo(np.int8).min - 1,
-                              iinfo(np.int16).max]),
-        ('int32', "integer", [iinfo(np.int16).min - 1,
-                              iinfo(np.int32).max]),
-        ('int64', "integer", [iinfo(np.int32).min - 1,
-                              iinfo(np.int64).max]),
-        ('uint16', "unsigned", [iinfo(np.uint8).min,
-                                iinfo(np.uint8).max + 1]),
-        ('uint32', "unsigned", [iinfo(np.uint16).min,
-                                iinfo(np.uint16).max + 1]),
-        ('uint64', "unsigned", [iinfo(np.uint32).min,
-                                iinfo(np.uint32).max + 1])
-    ])
-    def test_downcast_limits(self, dtype, downcast, min_max):
-        # see gh-14404: test the limits of each downcast.
-        series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
-        assert series.dtype == dtype
-
-    def test_coerce_uint64_conflict(self):
-        # see gh-17007 and gh-17125
-        #
-        # Still returns float despite the uint64-nan conflict,
-        # which would normally force the casting to object.
-        df = pd.DataFrame({"a": [200, 300, "", "NaN", 30000000000000000000]})
-        expected = pd.Series([200, 300, np.nan, np.nan,
-                              30000000000000000000], dtype=float, name="a")
-        result = to_numeric(df["a"], errors="coerce")
+@pytest.mark.parametrize("input_kwargs,result_kwargs", [
+    (dict(), dict(dtype=np.int64)),
+    (dict(errors="coerce", downcast="integer"), dict(dtype=np.int8))
+])
+def test_empty(input_kwargs, result_kwargs):
+    # see gh-16302
+    ser = Series([], dtype=object)
+    result = to_numeric(ser, **input_kwargs)
+
+    expected = Series([], **result_kwargs)
+    tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("last_val", ["7", 7])
+def test_series(last_val):
+    ser = Series(["1", "-3.14", last_val])
+    result = to_numeric(ser)
+
+    expected = Series([1, -3.14, 7])
+    tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("data", [
+    [1, 3, 4, 5],
+    [1., 3., 4., 5.],
+
+    # Bool is regarded as numeric.
+    [True, False, True, True]
+])
+def test_series_numeric(data):
+    ser = Series(data, index=list("ABCD"), name="EFG")
+
+    result = to_numeric(ser)
+    tm.assert_series_equal(result, ser)
+
+
+@pytest.mark.parametrize("data,msg", [
+    ([1, -3.14, "apple"],
+     'Unable to parse string "apple" at position 2'),
+    (["orange", 1, -3.14, "apple"],
+     'Unable to parse string "orange" at position 0')
+])
+def test_error(data, msg):
+    ser = Series(data)
+
+    with pytest.raises(ValueError, match=msg):
+        to_numeric(ser, errors="raise")
+
+
+@pytest.mark.parametrize("errors,exp_data", [
+    ("ignore", [1, -3.14, "apple"]),
+    ("coerce", [1, -3.14, np.nan])
+])
+def test_ignore_error(errors, exp_data):
+    ser = Series([1, -3.14, "apple"])
+    result = to_numeric(ser, errors=errors)
+
+    expected = Series(exp_data)
+    tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("errors,exp", [
+    ("raise", 'Unable to parse string "apple" at position 2'),
+    ("ignore", [True, False, "apple"]),
+
+    # Coerces to float.
+    ("coerce", [1., 0., np.nan])
+])
+def test_bool_handling(errors, exp):
+    ser = Series([True, False, "apple"])
+
+    if isinstance(exp, str):
+        with pytest.raises(ValueError, match=exp):
+            to_numeric(ser, errors=errors)
+    else:
+        result = to_numeric(ser, errors=errors)
+        expected = Series(exp)
+
         tm.assert_series_equal(result, expected)
 
-        s = pd.Series(["12345678901234567890", "1234567890", "ITEM"])
-        expected = pd.Series([12345678901234567890,
-                              1234567890, np.nan], dtype=float)
-        result = to_numeric(s, errors="coerce")
+
+def test_list():
+    ser = ["1", "-3.14", "7"]
+    res = to_numeric(ser)
+
+    expected = np.array([1, -3.14, 7])
+    tm.assert_numpy_array_equal(res, expected)
+
+
+@pytest.mark.parametrize("data,arr_kwargs", [
+    ([1, 3, 4, 5], dict(dtype=np.int64)),
+    ([1., 3., 4., 5.], dict()),
+
+    # Boolean is regarded as numeric.
+    ([True, False, True, True], dict())
+])
+def test_list_numeric(data, arr_kwargs):
+    result = to_numeric(data)
+    expected = np.array(data, **arr_kwargs)
+    tm.assert_numpy_array_equal(result, expected)
+
+
+@pytest.mark.parametrize("kwargs", [
+    dict(dtype="O"), dict()
+])
+def test_numeric(kwargs):
+    data = [1, -3.14, 7]
+
+    ser = Series(data, **kwargs)
+    result = to_numeric(ser)
+
+    expected = Series(data)
+    tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("columns", [
+    # One column.
+    "a",
+
+    # Multiple columns.
+    ["a", "b"]
+])
+def test_numeric_df_columns(columns):
+    # see gh-14827
+    df = DataFrame(dict(
+        a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"],
+        b=[1.0, 2.0, 3.0, 4.0],
+    ))
+
+    expected = DataFrame(dict(
+        a=[1.2, 3.14, np.inf, 0.1],
+        b=[1.0, 2.0, 3.0, 4.0],
+    ))
+
+    df_copy = df.copy()
+    df_copy[columns] = df_copy[columns].apply(to_numeric)
+
+    tm.assert_frame_equal(df_copy, expected)
+
+
+@pytest.mark.parametrize("data,exp_data", [
+    ([[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1],
+     [[3.14, 1.0], 1.6, 0.1]),
+    ([np.array([decimal.Decimal(3.14), 1.0]), 0.1],
+     [[3.14, 1.0], 0.1])
+])
+def test_numeric_embedded_arr_likes(data, exp_data):
+    # Test to_numeric with embedded lists and arrays
+    df = DataFrame(dict(a=data))
+    df["a"] = df["a"].apply(to_numeric)
+
+    expected = DataFrame(dict(a=exp_data))
+    tm.assert_frame_equal(df, expected)
+
+
+def test_all_nan():
+    ser = Series(["a", "b", "c"])
+    result = to_numeric(ser, errors="coerce")
+
+    expected = Series([np.nan, np.nan, np.nan])
+    tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
+def test_type_check(errors):
+    # see gh-11776
+    df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
+    kwargs = dict(errors=errors) if errors is not None else dict()
+    error_ctx = pytest.raises(TypeError, match="1-d array")
+
+    with error_ctx:
+        to_numeric(df, **kwargs)
+
+
+@pytest.mark.parametrize("val", [
+    1, 1.1, "1", "1.1", -1.5, "-1.5"
+])
+def test_scalar(val):
+    assert to_numeric(val) == float(val)
+
+
+@pytest.mark.parametrize("errors,checker", [
+    ("raise", 'Unable to parse string "fail" at position 0'),
+    ("ignore", lambda x: x == "fail"),
+    ("coerce", lambda x: np.isnan(x))
+])
+def test_scalar_fail(errors, checker):
+    scalar = "fail"
+
+    if isinstance(checker, str):
+        with pytest.raises(ValueError, match=checker):
+            to_numeric(scalar, errors=errors)
+    else:
+        assert checker(to_numeric(scalar, errors=errors))
+
+
+@pytest.fixture(params=[
+    (lambda x: Index(x, name="idx"), tm.assert_index_equal),
+    (lambda x: Series(x, name="ser"), tm.assert_series_equal),
+    (lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal)
+])
+def transform_assert_equal(request):
+    return request.param
+
+
+@pytest.mark.parametrize("data", [
+    [1, 2, 3],
+    [1., np.nan, 3, np.nan]
+])
+def test_numeric_dtypes(data, transform_assert_equal):
+    transform, assert_equal = transform_assert_equal
+    data = transform(data)
+
+    result = to_numeric(data)
+    assert_equal(result, data)
+
+
+@pytest.mark.parametrize("data,exp", [
+    (["1", "2", "3"], np.array([1, 2, 3], dtype="int64")),
+    (["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4]))
+])
+def test_str(data, exp, transform_assert_equal):
+    transform, assert_equal = transform_assert_equal
+    result = to_numeric(transform(data))
+
+    expected = transform(exp)
+    assert_equal(result, expected)
+
+
+def test_datetime_like(tz_naive_fixture, transform_assert_equal):
+    transform, assert_equal = transform_assert_equal
+    idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture)
+
+    result = to_numeric(transform(idx))
+    expected = transform(idx.asi8)
+    assert_equal(result, expected)
+
+
+def test_timedelta(transform_assert_equal):
+    transform, assert_equal = transform_assert_equal
+    idx = pd.timedelta_range("1 days", periods=3, freq="D")
+
+    result = to_numeric(transform(idx))
+    expected = transform(idx.asi8)
+    assert_equal(result, expected)
+
+
+def test_period(transform_assert_equal):
+    transform, assert_equal = transform_assert_equal
+
+    idx = pd.period_range("2011-01", periods=3, freq="M", name="")
+    inp = transform(idx)
+
+    if isinstance(inp, Index):
+        result = to_numeric(inp)
+        expected = transform(idx.asi8)
+        assert_equal(result, expected)
+    else:
+        # TODO: PeriodDtype, so support it in to_numeric.
+        pytest.skip("Missing PeriodDtype support in to_numeric")
+
+
+@pytest.mark.parametrize("errors,expected", [
+    ("raise", "Invalid object type at position 0"),
+    ("ignore", Series([[10.0, 2], 1.0, "apple"])),
+    ("coerce", Series([np.nan, 1.0, np.nan]))
+])
+def test_non_hashable(errors, expected):
+    # see gh-13324
+    ser = Series([[10.0, 2], 1.0, "apple"])
+
+    if isinstance(expected, str):
+        with pytest.raises(TypeError, match=expected):
+            to_numeric(ser, errors=errors)
+    else:
+        result = to_numeric(ser, errors=errors)
         tm.assert_series_equal(result, expected)
 
-        # For completeness, check against "ignore" and "raise"
-        result = to_numeric(s, errors="ignore")
-        tm.assert_series_equal(result, s)
 
-        msg = "Unable to parse string"
-        with pytest.raises(ValueError, match=msg):
-            to_numeric(s, errors="raise")
+def test_downcast_invalid_cast():
+    # see gh-13352
+    data = ["1", 2, 3]
+    invalid_downcast = "unsigned-integer"
+    msg = "invalid downcasting method provided"
+
+    with pytest.raises(ValueError, match=msg):
+        to_numeric(data, downcast=invalid_downcast)
+
+
+@pytest.mark.parametrize("data", [
+    ["1", 2, 3],
+    [1, 2, 3],
+    np.array(["1970-01-02", "1970-01-03",
+              "1970-01-04"], dtype="datetime64[D]")
+])
+@pytest.mark.parametrize("kwargs,exp_dtype", [
+    # Basic function tests.
+    (dict(), np.int64),
+    (dict(downcast=None), np.int64),
+
+    # Support below np.float32 is rare and far between.
+    (dict(downcast="float"), np.dtype(np.float32).char),
+
+    # Basic dtype support.
+    (dict(downcast="unsigned"), np.dtype(np.typecodes["UnsignedInteger"][0]))
+])
+def test_downcast_basic(data, kwargs, exp_dtype):
+    # see gh-13352
+    result = to_numeric(data, **kwargs)
+    expected = np.array([1, 2, 3], dtype=exp_dtype)
+    tm.assert_numpy_array_equal(result, expected)
+
+
+@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
+@pytest.mark.parametrize("data", [
+    ["1", 2, 3],
+    [1, 2, 3],
+    np.array(["1970-01-02", "1970-01-03",
+              "1970-01-04"], dtype="datetime64[D]")
+])
+def test_signed_downcast(data, signed_downcast):
+    # see gh-13352
+    smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
+    expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
+
+    res = to_numeric(data, downcast=signed_downcast)
+    tm.assert_numpy_array_equal(res, expected)
+
+
+def test_ignore_downcast_invalid_data():
+    # If we can't successfully cast the given
+    # data to a numeric dtype, do not bother
+    # with the downcast parameter.
+    data = ["foo", 2, 3]
+    expected = np.array(data, dtype=object)
+
+    res = to_numeric(data, errors="ignore",
+                     downcast="unsigned")
+    tm.assert_numpy_array_equal(res, expected)
+
+
+def test_ignore_downcast_neg_to_unsigned():
+    # Cannot cast to an unsigned integer
+    # because we have a negative number.
+    data = ["-1", 2, 3]
+    expected = np.array([-1, 2, 3], dtype=np.int64)
+
+    res = to_numeric(data, downcast="unsigned")
+    tm.assert_numpy_array_equal(res, expected)
+
+
+@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
+@pytest.mark.parametrize("data,expected", [
+    (["1.1", 2, 3],
+     np.array([1.1, 2, 3], dtype=np.float64)),
+    ([10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
+     np.array([10000.0, 20000, 3000,
+               40000.36, 50000, 50000.00], dtype=np.float64))
+])
+def test_ignore_downcast_cannot_convert_float(data, expected, downcast):
+    # Cannot cast to an integer (signed or unsigned)
+    # because we have a float number.
+    res = to_numeric(data, downcast=downcast)
+    tm.assert_numpy_array_equal(res, expected)
+
+
+@pytest.mark.parametrize("downcast,expected_dtype", [
+    ("integer", np.int16),
+    ("signed", np.int16),
+    ("unsigned", np.uint16)
+])
+def test_downcast_not8bit(downcast, expected_dtype):
+    # the smallest integer dtype need not be np.(u)int8
+    data = ["256", 257, 258]
+
+    expected = np.array([256, 257, 258], dtype=expected_dtype)
+    res = to_numeric(data, downcast=downcast)
+    tm.assert_numpy_array_equal(res, expected)
+
+
+@pytest.mark.parametrize("dtype,downcast,min_max", [
+    ("int8", "integer", [iinfo(np.int8).min,
+                         iinfo(np.int8).max]),
+    ("int16", "integer", [iinfo(np.int16).min,
+                          iinfo(np.int16).max]),
+    ("int32", "integer", [iinfo(np.int32).min,
+                          iinfo(np.int32).max]),
+    ("int64", "integer", [iinfo(np.int64).min,
+                          iinfo(np.int64).max]),
+    ("uint8", "unsigned", [iinfo(np.uint8).min,
+                           iinfo(np.uint8).max]),
+    ("uint16", "unsigned", [iinfo(np.uint16).min,
+                            iinfo(np.uint16).max]),
+    ("uint32", "unsigned", [iinfo(np.uint32).min,
+                            iinfo(np.uint32).max]),
+    ("uint64", "unsigned", [iinfo(np.uint64).min,
+                            iinfo(np.uint64).max]),
+    ("int16", "integer", [iinfo(np.int8).min,
+                          iinfo(np.int8).max + 1]),
+    ("int32", "integer", [iinfo(np.int16).min,
+                          iinfo(np.int16).max + 1]),
+    ("int64", "integer", [iinfo(np.int32).min,
+                          iinfo(np.int32).max + 1]),
+    ("int16", "integer", [iinfo(np.int8).min - 1,
+                          iinfo(np.int16).max]),
+    ("int32", "integer", [iinfo(np.int16).min - 1,
+                          iinfo(np.int32).max]),
+    ("int64", "integer", [iinfo(np.int32).min - 1,
+                          iinfo(np.int64).max]),
+    ("uint16", "unsigned", [iinfo(np.uint8).min,
+                            iinfo(np.uint8).max + 1]),
+    ("uint32", "unsigned", [iinfo(np.uint16).min,
+                            iinfo(np.uint16).max + 1]),
+    ("uint64", "unsigned", [iinfo(np.uint32).min,
+                            iinfo(np.uint32).max + 1])
+])
+def test_downcast_limits(dtype, downcast, min_max):
+    # see gh-14404: test the limits of each downcast.
+    series = to_numeric(Series(min_max), downcast=downcast)
+    assert series.dtype == dtype
+
+
+@pytest.mark.parametrize("data,exp_data", [
+    ([200, 300, "", "NaN", 30000000000000000000],
+     [200, 300, np.nan, np.nan, 30000000000000000000]),
+    (["12345678901234567890", "1234567890", "ITEM"],
+     [12345678901234567890, 1234567890, np.nan])
+])
+def test_coerce_uint64_conflict(data, exp_data):
+    # see gh-17007 and gh-17125
+    #
+    # Still returns float despite the uint64-nan conflict,
+    # which would normally force the casting to object.
+    result = to_numeric(Series(data), errors="coerce")
+    expected = Series(exp_data, dtype=float)
+    tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("errors,exp", [
+    ("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])),
+    ("raise", "Unable to parse string")
+])
+def test_non_coerce_uint64_conflict(errors, exp):
+    # see gh-17007 and gh-17125
+    #
+    # For completeness.
+    ser = Series(["12345678901234567890", "1234567890", "ITEM"])
+
+    if isinstance(exp, str):
+        with pytest.raises(ValueError, match=exp):
+            to_numeric(ser, errors=errors)
+    else:
+        result = to_numeric(ser, errors=errors)
+        tm.assert_series_equal(result, ser)

From 602eda47da1f560252ba4bf386875a68e561452c Mon Sep 17 00:00:00 2001
From: jbrockmendel <jbrockmendel@gmail.com>
Date: Sat, 26 Jan 2019 09:47:32 -0800
Subject: [PATCH 40/48] BLD: silence npy_no_deprecated warnings with
 numpy>=1.16.0 (#24864)

---
 setup.py | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/setup.py b/setup.py
index ed2d905f4358b..4bf040b8c8e20 100755
--- a/setup.py
+++ b/setup.py
@@ -457,6 +457,11 @@ def run(self):
     directives['linetrace'] = True
     macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')]
 
+# in numpy>=1.16.0, silence build warnings about deprecated API usage
+#  we can't do anything about these warnings because they stem from
+#  cython+numpy version mismatches.
+macros.append(('NPY_NO_DEPRECATED_API', '0'))
+
 
 # ----------------------------------------------------------------------
 # Specification of Dependencies

From 95f8dca47723192ddeed5a2f198d0521c687c9aa Mon Sep 17 00:00:00 2001
From: Noam Hershtig <noamher@users.noreply.github.com>
Date: Sat, 26 Jan 2019 19:48:41 +0200
Subject: [PATCH 41/48] CLN: Refactor cython to use memory views (#24932)

---
 pandas/_libs/algos.pyx                     | 26 +++---
 pandas/_libs/groupby_helper.pxi.in         | 92 +++++++++++-----------
 pandas/_libs/hashtable.pyx                 | 18 +++--
 pandas/_libs/hashtable_class_helper.pxi.in |  2 +-
 pandas/_libs/internals.pyx                 |  7 +-
 pandas/_libs/join.pyx                      | 46 +++++------
 pandas/_libs/lib.pyx                       | 47 +++++------
 pandas/_libs/missing.pyx                   |  7 +-
 pandas/_libs/parsers.pyx                   | 23 +++---
 pandas/_libs/reduction.pyx                 |  2 +-
 pandas/_libs/skiplist.pyx                  | 19 +++--
 pandas/_libs/sparse_op_helper.pxi.in       | 20 ++---
 pandas/_libs/tslibs/conversion.pyx         |  2 +-
 pandas/_libs/tslibs/fields.pyx             |  2 +-
 pandas/_libs/tslibs/parsing.pyx            |  5 +-
 pandas/_libs/tslibs/period.pyx             |  7 +-
 pandas/_libs/tslibs/resolution.pyx         | 19 ++---
 pandas/_libs/window.pyx                    | 13 +--
 pandas/io/msgpack/_packer.pyx              | 38 ++++-----
 pandas/io/msgpack/_unpacker.pyx            | 48 +++++------
 pandas/io/sas/sas.pyx                      | 11 +--
 21 files changed, 240 insertions(+), 214 deletions(-)

diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index b3c519ab99b6e..663411ad984c2 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -76,7 +76,7 @@ class NegInfinity(object):
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-cpdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr):
+cpdef ndarray[int64_t, ndim=1] unique_deltas(const int64_t[:] arr):
     """
     Efficiently find the unique first-differences of the given array.
 
@@ -150,7 +150,7 @@ def is_lexsorted(list_of_arrays: list) -> bint:
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def groupsort_indexer(ndarray[int64_t] index, Py_ssize_t ngroups):
+def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups):
     """
     compute a 1-d indexer that is an ordering of the passed index,
     ordered by the groups. This is a reverse of the label
@@ -230,7 +230,7 @@ def kth_smallest(numeric[:] a, Py_ssize_t k) -> numeric:
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def nancorr(ndarray[float64_t, ndim=2] mat, bint cov=0, minp=None):
+def nancorr(const float64_t[:, :] mat, bint cov=0, minp=None):
     cdef:
         Py_ssize_t i, j, xi, yi, N, K
         bint minpv
@@ -294,7 +294,7 @@ def nancorr(ndarray[float64_t, ndim=2] mat, bint cov=0, minp=None):
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1):
+def nancorr_spearman(const float64_t[:, :] mat, Py_ssize_t minp=1):
     cdef:
         Py_ssize_t i, j, xi, yi, N, K
         ndarray[float64_t, ndim=2] result
@@ -435,8 +435,8 @@ def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None):
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pad_inplace(ndarray[algos_t] values,
-                ndarray[uint8_t, cast=True] mask,
+def pad_inplace(algos_t[:] values,
+                const uint8_t[:] mask,
                 limit=None):
     cdef:
         Py_ssize_t i, N
@@ -472,8 +472,8 @@ def pad_inplace(ndarray[algos_t] values,
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pad_2d_inplace(ndarray[algos_t, ndim=2] values,
-                   ndarray[uint8_t, ndim=2] mask,
+def pad_2d_inplace(algos_t[:, :] values,
+                   const uint8_t[:, :] mask,
                    limit=None):
     cdef:
         Py_ssize_t i, j, N, K
@@ -602,8 +602,8 @@ def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None):
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def backfill_inplace(ndarray[algos_t] values,
-                     ndarray[uint8_t, cast=True] mask,
+def backfill_inplace(algos_t[:] values,
+                     const uint8_t[:] mask,
                      limit=None):
     cdef:
         Py_ssize_t i, N
@@ -639,8 +639,8 @@ def backfill_inplace(ndarray[algos_t] values,
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def backfill_2d_inplace(ndarray[algos_t, ndim=2] values,
-                        ndarray[uint8_t, ndim=2] mask,
+def backfill_2d_inplace(algos_t[:, :] values,
+                        const uint8_t[:, :] mask,
                         limit=None):
     cdef:
         Py_ssize_t i, j, N, K
@@ -678,7 +678,7 @@ def backfill_2d_inplace(ndarray[algos_t, ndim=2] values,
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def arrmap(ndarray[algos_t] index, object func):
+def arrmap(algos_t[:] index, object func):
     cdef:
         Py_ssize_t length = index.shape[0]
         Py_ssize_t i = 0
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index abac9f147848e..858039f038d02 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -29,10 +29,10 @@ def get_dispatch(dtypes):
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def group_add_{{name}}(ndarray[{{c_type}}, ndim=2] out,
-                       ndarray[int64_t] counts,
-                       ndarray[{{c_type}}, ndim=2] values,
-                       ndarray[int64_t] labels,
+def group_add_{{name}}({{c_type}}[:, :] out,
+                       int64_t[:] counts,
+                       {{c_type}}[:, :] values,
+                       const int64_t[:] labels,
                        Py_ssize_t min_count=0):
     """
     Only aggregates on axis=0
@@ -76,10 +76,10 @@ def group_add_{{name}}(ndarray[{{c_type}}, ndim=2] out,
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def group_prod_{{name}}(ndarray[{{c_type}}, ndim=2] out,
-                        ndarray[int64_t] counts,
-                        ndarray[{{c_type}}, ndim=2] values,
-                        ndarray[int64_t] labels,
+def group_prod_{{name}}({{c_type}}[:, :] out,
+                        int64_t[:] counts,
+                        {{c_type}}[:, :] values,
+                        const int64_t[:] labels,
                         Py_ssize_t min_count=0):
     """
     Only aggregates on axis=0
@@ -123,10 +123,10 @@ def group_prod_{{name}}(ndarray[{{c_type}}, ndim=2] out,
 @cython.wraparound(False)
 @cython.boundscheck(False)
 @cython.cdivision(True)
-def group_var_{{name}}(ndarray[{{c_type}}, ndim=2] out,
-                       ndarray[int64_t] counts,
-                       ndarray[{{c_type}}, ndim=2] values,
-                       ndarray[int64_t] labels,
+def group_var_{{name}}({{c_type}}[:, :] out,
+                       int64_t[:] counts,
+                       {{c_type}}[:, :] values,
+                       const int64_t[:] labels,
                        Py_ssize_t min_count=-1):
     cdef:
         Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
@@ -175,10 +175,10 @@ def group_var_{{name}}(ndarray[{{c_type}}, ndim=2] out,
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def group_mean_{{name}}(ndarray[{{c_type}}, ndim=2] out,
-                        ndarray[int64_t] counts,
-                        ndarray[{{c_type}}, ndim=2] values,
-                        ndarray[int64_t] labels,
+def group_mean_{{name}}({{c_type}}[:, :] out,
+                        int64_t[:] counts,
+                        {{c_type}}[:, :] values,
+                        const int64_t[:] labels,
                         Py_ssize_t min_count=-1):
     cdef:
         Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
@@ -220,11 +220,11 @@ def group_mean_{{name}}(ndarray[{{c_type}}, ndim=2] out,
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def group_ohlc_{{name}}(ndarray[{{c_type}}, ndim=2] out,
-                  ndarray[int64_t] counts,
-                  ndarray[{{c_type}}, ndim=2] values,
-                  ndarray[int64_t] labels,
-                  Py_ssize_t min_count=-1):
+def group_ohlc_{{name}}({{c_type}}[:, :] out,
+                        int64_t[:] counts,
+                        {{c_type}}[:, :] values,
+                        const int64_t[:] labels,
+                        Py_ssize_t min_count=-1):
     """
     Only aggregates on axis=0
     """
@@ -293,10 +293,10 @@ def get_dispatch(dtypes):
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def group_last_{{name}}(ndarray[{{c_type}}, ndim=2] out,
-                        ndarray[int64_t] counts,
-                        ndarray[{{c_type}}, ndim=2] values,
-                        ndarray[int64_t] labels,
+def group_last_{{name}}({{c_type}}[:, :] out,
+                        int64_t[:] counts,
+                        {{c_type}}[:, :] values,
+                        const int64_t[:] labels,
                         Py_ssize_t min_count=-1):
     """
     Only aggregates on axis=0
@@ -350,10 +350,10 @@ def group_last_{{name}}(ndarray[{{c_type}}, ndim=2] out,
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def group_nth_{{name}}(ndarray[{{c_type}}, ndim=2] out,
-                       ndarray[int64_t] counts,
-                       ndarray[{{c_type}}, ndim=2] values,
-                       ndarray[int64_t] labels, int64_t rank,
+def group_nth_{{name}}({{c_type}}[:, :] out,
+                       int64_t[:] counts,
+                       {{c_type}}[:, :] values,
+                       const int64_t[:] labels, int64_t rank,
                        Py_ssize_t min_count=-1):
     """
     Only aggregates on axis=0
@@ -411,9 +411,9 @@ def group_nth_{{name}}(ndarray[{{c_type}}, ndim=2] out,
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
-                        ndarray[{{c_type}}, ndim=2] values,
-                        ndarray[int64_t] labels,
+def group_rank_{{name}}(float64_t[:, :] out,
+                        {{c_type}}[:, :] values,
+                        const int64_t[:] labels,
                         bint is_datetimelike, object ties_method,
                         bint ascending, bint pct, object na_option):
     """
@@ -606,10 +606,10 @@ ctypedef fused groupby_t:
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def group_max(ndarray[groupby_t, ndim=2] out,
-              ndarray[int64_t] counts,
-              ndarray[groupby_t, ndim=2] values,
-              ndarray[int64_t] labels,
+def group_max(groupby_t[:, :] out,
+              int64_t[:] counts,
+              groupby_t[:, :] values,
+              const int64_t[:] labels,
               Py_ssize_t min_count=-1):
     """
     Only aggregates on axis=0
@@ -669,10 +669,10 @@ def group_max(ndarray[groupby_t, ndim=2] out,
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def group_min(ndarray[groupby_t, ndim=2] out,
-              ndarray[int64_t] counts,
-              ndarray[groupby_t, ndim=2] values,
-              ndarray[int64_t] labels,
+def group_min(groupby_t[:, :] out,
+              int64_t[:] counts,
+              groupby_t[:, :] values,
+              const int64_t[:] labels,
               Py_ssize_t min_count=-1):
     """
     Only aggregates on axis=0
@@ -731,9 +731,9 @@ def group_min(ndarray[groupby_t, ndim=2] out,
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def group_cummin(ndarray[groupby_t, ndim=2] out,
-                 ndarray[groupby_t, ndim=2] values,
-                 ndarray[int64_t] labels,
+def group_cummin(groupby_t[:, :] out,
+                 groupby_t[:, :] values,
+                 const int64_t[:] labels,
                  bint is_datetimelike):
     """
     Only transforms on axis=0
@@ -779,9 +779,9 @@ def group_cummin(ndarray[groupby_t, ndim=2] out,
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def group_cummax(ndarray[groupby_t, ndim=2] out,
-                 ndarray[groupby_t, ndim=2] values,
-                 ndarray[int64_t] labels,
+def group_cummax(groupby_t[:, :] out,
+                 groupby_t[:, :] values,
+                 const int64_t[:] labels,
                  bint is_datetimelike):
     """
     Only transforms on axis=0
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index 47fa5932290af..8d0c451ad0ab8 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -52,9 +52,10 @@ include "hashtable_class_helper.pxi"
 include "hashtable_func_helper.pxi"
 
 cdef class Factorizer:
-    cdef public PyObjectHashTable table
-    cdef public ObjectVector uniques
-    cdef public Py_ssize_t count
+    cdef public:
+        PyObjectHashTable table
+        ObjectVector uniques
+        Py_ssize_t count
 
     def __init__(self, size_hint):
         self.table = PyObjectHashTable(size_hint)
@@ -96,9 +97,10 @@ cdef class Factorizer:
 
 
 cdef class Int64Factorizer:
-    cdef public Int64HashTable table
-    cdef public Int64Vector uniques
-    cdef public Py_ssize_t count
+    cdef public:
+        Int64HashTable table
+        Int64Vector uniques
+        Py_ssize_t count
 
     def __init__(self, size_hint):
         self.table = Int64HashTable(size_hint)
@@ -140,7 +142,7 @@ cdef class Int64Factorizer:
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def unique_label_indices(ndarray[int64_t, ndim=1] labels):
+def unique_label_indices(const int64_t[:] labels):
     """
     indices of the first occurrences of the unique labels
     *excluding* -1. equivalent to:
@@ -168,6 +170,6 @@ def unique_label_indices(ndarray[int64_t, ndim=1] labels):
     kh_destroy_int64(table)
 
     arr = idx.to_array()
-    arr = arr[labels[arr].argsort()]
+    arr = arr[np.asarray(labels)[arr].argsort()]
 
     return arr[1:] if arr.size != 0 and labels[arr[0]] == -1 else arr
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index eac35588b6fc3..3644928d8dedc 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -322,7 +322,7 @@ cdef class {{name}}HashTable(HashTable):
                 self.table.vals[k] = <Py_ssize_t>values[i]
 
     @cython.boundscheck(False)
-    def map_locations(self, ndarray[{{dtype}}_t, ndim=1] values):
+    def map_locations(self, const {{dtype}}_t[:] values):
         cdef:
             Py_ssize_t i, n = len(values)
             int ret = 0
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 72a1cf16f96b6..f23d2666b4bf4 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -23,10 +23,11 @@ from pandas._libs.algos import ensure_int64
 
 cdef class BlockPlacement:
     # __slots__ = '_as_slice', '_as_array', '_len'
-    cdef slice _as_slice
-    cdef object _as_array
+    cdef:
+        slice _as_slice
+        object _as_array
 
-    cdef bint _has_slice, _has_array, _is_known_slice_like
+        bint _has_slice, _has_array, _is_known_slice_like
 
     def __init__(self, val):
         cdef:
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index e4440ac3d9fd8..503867058b3c8 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -14,7 +14,7 @@ from pandas._libs.algos import groupsort_indexer, ensure_platform_int
 from pandas.core.algorithms import take_nd
 
 
-def inner_join(ndarray[int64_t] left, ndarray[int64_t] right,
+def inner_join(const int64_t[:] left, const int64_t[:] right,
                Py_ssize_t max_groups):
     cdef:
         Py_ssize_t i, j, k, count = 0
@@ -65,7 +65,7 @@ def inner_join(ndarray[int64_t] left, ndarray[int64_t] right,
             _get_result_indexer(right_sorter, right_indexer))
 
 
-def left_outer_join(ndarray[int64_t] left, ndarray[int64_t] right,
+def left_outer_join(const int64_t[:] left, const int64_t[:] right,
                     Py_ssize_t max_groups, sort=True):
     cdef:
         Py_ssize_t i, j, k, count = 0
@@ -139,7 +139,7 @@ def left_outer_join(ndarray[int64_t] left, ndarray[int64_t] right,
     return left_indexer, right_indexer
 
 
-def full_outer_join(ndarray[int64_t] left, ndarray[int64_t] right,
+def full_outer_join(const int64_t[:] left, const int64_t[:] right,
                     Py_ssize_t max_groups):
     cdef:
         Py_ssize_t i, j, k, count = 0
@@ -213,7 +213,7 @@ def _get_result_indexer(sorter, indexer):
     return res
 
 
-def ffill_indexer(ndarray[int64_t] indexer):
+def ffill_indexer(const int64_t[:] indexer):
     cdef:
         Py_ssize_t i, n = len(indexer)
         ndarray[int64_t] result
@@ -252,7 +252,7 @@ ctypedef fused join_t:
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def left_join_indexer_unique(ndarray[join_t] left, ndarray[join_t] right):
+def left_join_indexer_unique(join_t[:] left, join_t[:] right):
     cdef:
         Py_ssize_t i, j, nleft, nright
         ndarray[int64_t] indexer
@@ -677,10 +677,10 @@ ctypedef fused by_t:
     uint64_t
 
 
-def asof_join_backward_on_X_by_Y(ndarray[asof_t] left_values,
-                                 ndarray[asof_t] right_values,
-                                 ndarray[by_t] left_by_values,
-                                 ndarray[by_t] right_by_values,
+def asof_join_backward_on_X_by_Y(asof_t[:] left_values,
+                                 asof_t[:] right_values,
+                                 by_t[:] left_by_values,
+                                 by_t[:] right_by_values,
                                  bint allow_exact_matches=1,
                                  tolerance=None):
 
@@ -746,10 +746,10 @@ def asof_join_backward_on_X_by_Y(ndarray[asof_t] left_values,
     return left_indexer, right_indexer
 
 
-def asof_join_forward_on_X_by_Y(ndarray[asof_t] left_values,
-                                ndarray[asof_t] right_values,
-                                ndarray[by_t] left_by_values,
-                                ndarray[by_t] right_by_values,
+def asof_join_forward_on_X_by_Y(asof_t[:] left_values,
+                                asof_t[:] right_values,
+                                by_t[:] left_by_values,
+                                by_t[:] right_by_values,
                                 bint allow_exact_matches=1,
                                 tolerance=None):
 
@@ -815,10 +815,10 @@ def asof_join_forward_on_X_by_Y(ndarray[asof_t] left_values,
     return left_indexer, right_indexer
 
 
-def asof_join_nearest_on_X_by_Y(ndarray[asof_t] left_values,
-                                ndarray[asof_t] right_values,
-                                ndarray[by_t] left_by_values,
-                                ndarray[by_t] right_by_values,
+def asof_join_nearest_on_X_by_Y(asof_t[:] left_values,
+                                asof_t[:] right_values,
+                                by_t[:] left_by_values,
+                                by_t[:] right_by_values,
                                 bint allow_exact_matches=1,
                                 tolerance=None):
 
@@ -864,8 +864,8 @@ def asof_join_nearest_on_X_by_Y(ndarray[asof_t] left_values,
 # asof_join
 # ----------------------------------------------------------------------
 
-def asof_join_backward(ndarray[asof_t] left_values,
-                       ndarray[asof_t] right_values,
+def asof_join_backward(asof_t[:] left_values,
+                       asof_t[:] right_values,
                        bint allow_exact_matches=1,
                        tolerance=None):
 
@@ -917,8 +917,8 @@ def asof_join_backward(ndarray[asof_t] left_values,
     return left_indexer, right_indexer
 
 
-def asof_join_forward(ndarray[asof_t] left_values,
-                      ndarray[asof_t] right_values,
+def asof_join_forward(asof_t[:] left_values,
+                      asof_t[:] right_values,
                       bint allow_exact_matches=1,
                       tolerance=None):
 
@@ -971,8 +971,8 @@ def asof_join_forward(ndarray[asof_t] left_values,
     return left_indexer, right_indexer
 
 
-def asof_join_nearest(ndarray[asof_t] left_values,
-                      ndarray[asof_t] right_values,
+def asof_join_nearest(asof_t[:] left_values,
+                      asof_t[:] right_values,
                       bint allow_exact_matches=1,
                       tolerance=None):
 
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index f845a5437ded4..4745916eb0ce2 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -40,11 +40,12 @@ cdef extern from "numpy/arrayobject.h":
         # Use PyDataType_* macros when possible, however there are no macros
         # for accessing some of the fields, so some are defined. Please
         # ask on cython-dev if you need more.
-        cdef int type_num
-        cdef int itemsize "elsize"
-        cdef char byteorder
-        cdef object fields
-        cdef tuple names
+        cdef:
+            int type_num
+            int itemsize "elsize"
+            char byteorder
+            object fields
+            tuple names
 
 
 cdef extern from "src/parse_helper.h":
@@ -67,12 +68,13 @@ from pandas._libs.missing cimport (
 
 # constants that will be compared to potentially arbitrarily large
 # python int
-cdef object oINT64_MAX = <int64_t>INT64_MAX
-cdef object oINT64_MIN = <int64_t>INT64_MIN
-cdef object oUINT64_MAX = <uint64_t>UINT64_MAX
+cdef:
+    object oINT64_MAX = <int64_t>INT64_MAX
+    object oINT64_MIN = <int64_t>INT64_MIN
+    object oUINT64_MAX = <uint64_t>UINT64_MAX
 
-cdef bint PY2 = sys.version_info[0] == 2
-cdef float64_t NaN = <float64_t>np.NaN
+    bint PY2 = sys.version_info[0] == 2
+    float64_t NaN = <float64_t>np.NaN
 
 
 def values_from_object(obj: object):
@@ -376,7 +378,7 @@ def fast_zip(list ndarrays):
     return result
 
 
-def get_reverse_indexer(ndarray[int64_t] indexer, Py_ssize_t length):
+def get_reverse_indexer(const int64_t[:] indexer, Py_ssize_t length):
     """
     Reverse indexing operation.
 
@@ -405,7 +407,7 @@ def get_reverse_indexer(ndarray[int64_t] indexer, Py_ssize_t length):
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def has_infs_f4(ndarray[float32_t] arr) -> bool:
+def has_infs_f4(const float32_t[:] arr) -> bool:
     cdef:
         Py_ssize_t i, n = len(arr)
         float32_t inf, neginf, val
@@ -422,7 +424,7 @@ def has_infs_f4(ndarray[float32_t] arr) -> bool:
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def has_infs_f8(ndarray[float64_t] arr) -> bool:
+def has_infs_f8(const float64_t[:] arr) -> bool:
     cdef:
         Py_ssize_t i, n = len(arr)
         float64_t inf, neginf, val
@@ -660,7 +662,7 @@ def clean_index_list(obj: list):
 # is a general, O(max(len(values), len(binner))) method.
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner,
+def generate_bins_dt64(ndarray[int64_t] values, const int64_t[:] binner,
                        object closed='left', bint hasnans=0):
     """
     Int64 (datetime64) version of generic python version in groupby.py
@@ -723,7 +725,7 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner,
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def row_bool_subset(ndarray[float64_t, ndim=2] values,
+def row_bool_subset(const float64_t[:, :] values,
                     ndarray[uint8_t, cast=True] mask):
     cdef:
         Py_ssize_t i, j, n, k, pos = 0
@@ -767,8 +769,8 @@ def row_bool_subset_object(ndarray[object, ndim=2] values,
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def get_level_sorter(ndarray[int64_t, ndim=1] label,
-                     ndarray[int64_t, ndim=1] starts):
+def get_level_sorter(const int64_t[:] label,
+                     const int64_t[:] starts):
     """
     argsort for a single level of a multi-index, keeping the order of higher
     levels unchanged. `starts` points to starts of same-key indices w.r.t
@@ -780,10 +782,11 @@ def get_level_sorter(ndarray[int64_t, ndim=1] label,
         int64_t l, r
         Py_ssize_t i
         ndarray[int64_t, ndim=1] out = np.empty(len(label), dtype=np.int64)
+        ndarray[int64_t, ndim=1] label_arr = np.asarray(label)
 
     for i in range(len(starts) - 1):
         l, r = starts[i], starts[i + 1]
-        out[l:r] = l + label[l:r].argsort(kind='mergesort')
+        out[l:r] = l + label_arr[l:r].argsort(kind='mergesort')
 
     return out
 
@@ -791,7 +794,7 @@ def get_level_sorter(ndarray[int64_t, ndim=1] label,
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
-                   ndarray[int64_t, ndim=1] labels,
+                   const int64_t[:] labels,
                    Py_ssize_t max_bin,
                    int axis):
     cdef:
@@ -818,7 +821,7 @@ def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
     return counts
 
 
-def generate_slices(ndarray[int64_t] labels, Py_ssize_t ngroups):
+def generate_slices(const int64_t[:] labels, Py_ssize_t ngroups):
     cdef:
         Py_ssize_t i, group_size, n, start
         int64_t lab
@@ -847,7 +850,7 @@ def generate_slices(ndarray[int64_t] labels, Py_ssize_t ngroups):
     return starts, ends
 
 
-def indices_fast(object index, ndarray[int64_t] labels, list keys,
+def indices_fast(object index, const int64_t[:] labels, list keys,
                  list sorted_labels):
     cdef:
         Py_ssize_t i, j, k, lab, cur, start, n = len(labels)
@@ -2146,7 +2149,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def map_infer_mask(ndarray arr, object f, ndarray[uint8_t] mask,
+def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask,
                    bint convert=1):
     """
     Substitute for np.vectorize with pandas-friendly dtype inference
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 229edbac4992d..ab0e4cd6cc765 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -16,10 +16,11 @@ from pandas._libs.tslibs.nattype cimport (
     checknull_with_nat, c_NaT as NaT, is_null_datetimelike)
 
 
-cdef float64_t INF = <float64_t>np.inf
-cdef float64_t NEGINF = -INF
+cdef:
+    float64_t INF = <float64_t>np.inf
+    float64_t NEGINF = -INF
 
-cdef int64_t NPY_NAT = util.get_nat()
+    int64_t NPY_NAT = util.get_nat()
 
 
 cpdef bint checknull(object val):
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 6cb6ed749f87b..f679746643643 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -64,10 +64,11 @@ from pandas.errors import (ParserError, DtypeWarning,
 CParserError = ParserError
 
 
-cdef bint PY3 = (sys.version_info[0] >= 3)
+cdef:
+    bint PY3 = (sys.version_info[0] >= 3)
 
-cdef float64_t INF = <float64_t>np.inf
-cdef float64_t NEGINF = -INF
+    float64_t INF = <float64_t>np.inf
+    float64_t NEGINF = -INF
 
 
 cdef extern from "errno.h":
@@ -735,7 +736,7 @@ cdef class TextReader:
             int status
             int64_t hr, data_line
             char *errors = "strict"
-            cdef StringPath path = _string_path(self.c_encoding)
+            StringPath path = _string_path(self.c_encoding)
 
         header = []
         unnamed_cols = set()
@@ -1389,8 +1390,9 @@ cdef class TextReader:
                 return None
 
 
-cdef object _true_values = [b'True', b'TRUE', b'true']
-cdef object _false_values = [b'False', b'FALSE', b'false']
+cdef:
+    object _true_values = [b'True', b'TRUE', b'true']
+    object _false_values = [b'False', b'FALSE', b'false']
 
 
 def _ensure_encoded(list lst):
@@ -1637,7 +1639,7 @@ cdef _categorical_convert(parser_t *parser, int64_t col,
         int64_t current_category = 0
 
         char *errors = "strict"
-        cdef StringPath path = _string_path(encoding)
+        StringPath path = _string_path(encoding)
 
         int ret = 0
         kh_str_t *table
@@ -1727,9 +1729,10 @@ cdef inline void _to_fw_string_nogil(parser_t *parser, int64_t col,
         data += width
 
 
-cdef char* cinf = b'inf'
-cdef char* cposinf = b'+inf'
-cdef char* cneginf = b'-inf'
+cdef:
+    char* cinf = b'inf'
+    char* cposinf = b'+inf'
+    char* cneginf = b'-inf'
 
 
 cdef _try_double(parser_t *parser, int64_t col,
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index ca39c4de4d309..507567cf480d7 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -494,7 +494,7 @@ class InvalidApply(Exception):
 
 
 def apply_frame_axis0(object frame, object f, object names,
-                      ndarray[int64_t] starts, ndarray[int64_t] ends):
+                      const int64_t[:] starts, const int64_t[:] ends):
     cdef:
         BlockSlider slider
         Py_ssize_t i, n = len(starts)
diff --git a/pandas/_libs/skiplist.pyx b/pandas/_libs/skiplist.pyx
index 6698fcb767d7c..2fdee72f9d588 100644
--- a/pandas/_libs/skiplist.pyx
+++ b/pandas/_libs/skiplist.pyx
@@ -57,8 +57,9 @@ cdef class IndexableSkiplist:
         return self.get(i)
 
     cpdef get(self, Py_ssize_t i):
-        cdef Py_ssize_t level
-        cdef Node node
+        cdef:
+            Py_ssize_t level
+            Node node
 
         node = self.head
         i += 1
@@ -71,9 +72,10 @@ cdef class IndexableSkiplist:
         return node.value
 
     cpdef insert(self, double value):
-        cdef Py_ssize_t level, steps, d
-        cdef Node node, prevnode, newnode, next_at_level, tmp
-        cdef list chain, steps_at_level
+        cdef:
+            Py_ssize_t level, steps, d
+            Node node, prevnode, newnode, next_at_level, tmp
+            list chain, steps_at_level
 
         # find first node on each level where node.next[levels].value > value
         chain = [None] * self.maxlevels
@@ -110,9 +112,10 @@ cdef class IndexableSkiplist:
         self.size += 1
 
     cpdef remove(self, double value):
-        cdef Py_ssize_t level, d
-        cdef Node node, prevnode, tmpnode, next_at_level
-        cdef list chain
+        cdef:
+            Py_ssize_t level, d
+            Node node, prevnode, tmpnode, next_at_level
+            list chain
 
         # find first node on each level where node.next[levels].value >= value
         chain = [None] * self.maxlevels
diff --git a/pandas/_libs/sparse_op_helper.pxi.in b/pandas/_libs/sparse_op_helper.pxi.in
index c6621ab5977ca..5949a3fd0ed81 100644
--- a/pandas/_libs/sparse_op_helper.pxi.in
+++ b/pandas/_libs/sparse_op_helper.pxi.in
@@ -125,10 +125,10 @@ def get_dispatch(dtypes):
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-cdef inline tuple block_op_{{opname}}_{{dtype}}(ndarray x_,
+cdef inline tuple block_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_,
                                                 BlockIndex xindex,
                                                 {{dtype}}_t xfill,
-                                                ndarray y_,
+                                                {{dtype}}_t[:] y_,
                                                 BlockIndex yindex,
                                                 {{dtype}}_t yfill):
     '''
@@ -142,7 +142,7 @@ cdef inline tuple block_op_{{opname}}_{{dtype}}(ndarray x_,
         int32_t xloc, yloc
         Py_ssize_t xblock = 0, yblock = 0 # block numbers
 
-        ndarray[{{dtype}}_t, ndim=1] x, y
+        {{dtype}}_t[:] x, y
         ndarray[{{rdtype}}_t, ndim=1] out
 
     # to suppress Cython warning
@@ -226,16 +226,18 @@ cdef inline tuple block_op_{{opname}}_{{dtype}}(ndarray x_,
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-cdef inline tuple int_op_{{opname}}_{{dtype}}(ndarray x_, IntIndex xindex,
+cdef inline tuple int_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_,
+                                              IntIndex xindex,
                                               {{dtype}}_t xfill,
-                                              ndarray y_, IntIndex yindex,
+                                              {{dtype}}_t[:] y_,
+                                              IntIndex yindex,
                                               {{dtype}}_t yfill):
     cdef:
         IntIndex out_index
         Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
         int32_t xloc, yloc
-        ndarray[int32_t, ndim=1] xindices, yindices, out_indices
-        ndarray[{{dtype}}_t, ndim=1] x, y
+        int32_t[:] xindices, yindices, out_indices
+        {{dtype}}_t[:] x, y
         ndarray[{{rdtype}}_t, ndim=1] out
 
     # suppress Cython compiler warnings due to inlining
@@ -284,9 +286,9 @@ cdef inline tuple int_op_{{opname}}_{{dtype}}(ndarray x_, IntIndex xindex,
     return out, out_index, {{(opname, 'xfill', 'yfill', dtype) | get_op}}
 
 
-cpdef sparse_{{opname}}_{{dtype}}(ndarray[{{dtype}}_t, ndim=1] x,
+cpdef sparse_{{opname}}_{{dtype}}({{dtype}}_t[:] x,
                                   SparseIndex xindex, {{dtype}}_t xfill,
-                                  ndarray[{{dtype}}_t, ndim=1] y,
+                                  {{dtype}}_t[:] y,
                                   SparseIndex yindex, {{dtype}}_t yfill):
 
     if isinstance(xindex, BlockIndex):
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 6c8b732928bc3..1c0adaaa288a9 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -147,7 +147,7 @@ def ensure_timedelta64ns(arr: ndarray, copy: bool=True):
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def datetime_to_datetime64(values: object[:]):
+def datetime_to_datetime64(object[:] values):
     """
     Convert ndarray of datetime-like objects to int64 array representing
     nanosecond timestamps.
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 5cda7992369fc..240f008394099 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -381,7 +381,7 @@ def get_start_end_field(int64_t[:] dtindex, object field,
 
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def get_date_field(ndarray[int64_t] dtindex, object field):
+def get_date_field(int64_t[:] dtindex, object field):
     """
     Given a int64-based datetime index, extract the year, month, etc.,
     field and return an array of these values.
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 82719de2dbdbd..7759e165b7193 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -44,9 +44,10 @@ class DateParseError(ValueError):
 _DEFAULT_DATETIME = datetime(1, 1, 1).replace(hour=0, minute=0,
                                               second=0, microsecond=0)
 
-cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])')
+cdef:
+    object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])')
 
-cdef set _not_datelike_strings = {'a', 'A', 'm', 'M', 'p', 'P', 't', 'T'}
+    set _not_datelike_strings = {'a', 'A', 'm', 'M', 'p', 'P', 't', 'T'}
 
 # ----------------------------------------------------------------------
 
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 2f4edb7de8f95..e38e9a1ca5df6 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -52,9 +52,10 @@ from pandas._libs.tslibs.nattype cimport (
 from pandas._libs.tslibs.offsets cimport to_offset
 from pandas._libs.tslibs.offsets import _Tick
 
-cdef bint PY2 = str == bytes
-cdef enum:
-    INT32_MIN = -2147483648
+cdef:
+    bint PY2 = str == bytes
+    enum:
+        INT32_MIN = -2147483648
 
 
 ctypedef struct asfreq_info:
diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx
index f80c1e9841abe..13a4f5ba48557 100644
--- a/pandas/_libs/tslibs/resolution.pyx
+++ b/pandas/_libs/tslibs/resolution.pyx
@@ -16,15 +16,16 @@ from pandas._libs.tslibs.ccalendar cimport get_days_in_month
 # ----------------------------------------------------------------------
 # Constants
 
-cdef int64_t NPY_NAT = get_nat()
-
-cdef int RESO_NS = 0
-cdef int RESO_US = 1
-cdef int RESO_MS = 2
-cdef int RESO_SEC = 3
-cdef int RESO_MIN = 4
-cdef int RESO_HR = 5
-cdef int RESO_DAY = 6
+cdef:
+    int64_t NPY_NAT = get_nat()
+
+    int RESO_NS = 0
+    int RESO_US = 1
+    int RESO_MS = 2
+    int RESO_SEC = 3
+    int RESO_MIN = 4
+    int RESO_HR = 5
+    int RESO_DAY = 6
 
 # ----------------------------------------------------------------------
 
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx
index e8f3de64c3823..cc5b3b63f5b04 100644
--- a/pandas/_libs/window.pyx
+++ b/pandas/_libs/window.pyx
@@ -26,13 +26,14 @@ from pandas._libs.skiplist cimport (
     skiplist_t, skiplist_init, skiplist_destroy, skiplist_get, skiplist_insert,
     skiplist_remove)
 
-cdef float32_t MINfloat32 = np.NINF
-cdef float64_t MINfloat64 = np.NINF
+cdef:
+    float32_t MINfloat32 = np.NINF
+    float64_t MINfloat64 = np.NINF
 
-cdef float32_t MAXfloat32 = np.inf
-cdef float64_t MAXfloat64 = np.inf
+    float32_t MAXfloat32 = np.inf
+    float64_t MAXfloat64 = np.inf
 
-cdef float64_t NaN = <float64_t>np.NaN
+    float64_t NaN = <float64_t>np.NaN
 
 cdef inline int int_max(int a, int b): return a if a >= b else b
 cdef inline int int_min(int a, int b): return a if a <= b else b
@@ -242,7 +243,7 @@ cdef class VariableWindowIndexer(WindowIndexer):
         # max window size
         self.win = (self.end - self.start).max()
 
-    def build(self, ndarray[int64_t] index, int64_t win, bint left_closed,
+    def build(self, const int64_t[:] index, int64_t win, bint left_closed,
               bint right_closed):
 
         cdef:
diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx
index d67c632188e62..8e2d943d8ddb1 100644
--- a/pandas/io/msgpack/_packer.pyx
+++ b/pandas/io/msgpack/_packer.pyx
@@ -74,14 +74,15 @@ cdef class Packer(object):
         Use bin type introduced in msgpack spec 2.0 for bytes.
         It also enable str8 type for unicode.
     """
-    cdef msgpack_packer pk
-    cdef object _default
-    cdef object _bencoding
-    cdef object _berrors
-    cdef char *encoding
-    cdef char *unicode_errors
-    cdef bint use_float
-    cdef bint autoreset
+    cdef:
+        msgpack_packer pk
+        object _default
+        object _bencoding
+        object _berrors
+        char *encoding
+        char *unicode_errors
+        bint use_float
+        bint autoreset
 
     def __cinit__(self):
         cdef int buf_size = 1024 * 1024
@@ -123,16 +124,17 @@ cdef class Packer(object):
 
     cdef int _pack(self, object o,
                    int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
-        cdef long long llval
-        cdef unsigned long long ullval
-        cdef long longval
-        cdef float fval
-        cdef double dval
-        cdef char* rawval
-        cdef int ret
-        cdef dict d
-        cdef size_t L
-        cdef int default_used = 0
+        cdef:
+            long long llval
+            unsigned long long ullval
+            long longval
+            float fval
+            double dval
+            char* rawval
+            int ret
+            dict d
+            size_t L
+            int default_used = 0
 
         if nest_limit < 0:
             raise PackValueError("recursion limit exceeded.")
diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx
index 0c50aa5e68103..9bbfe749ef9ba 100644
--- a/pandas/io/msgpack/_unpacker.pyx
+++ b/pandas/io/msgpack/_unpacker.pyx
@@ -120,14 +120,15 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
 
     See :class:`Unpacker` for options.
     """
-    cdef unpack_context ctx
-    cdef size_t off = 0
-    cdef int ret
+    cdef:
+        unpack_context ctx
+        size_t off = 0
+        int ret
 
-    cdef char* buf
-    cdef Py_ssize_t buf_len
-    cdef char* cenc = NULL
-    cdef char* cerr = NULL
+        char* buf
+        Py_ssize_t buf_len
+        char* cenc = NULL
+        char* cerr = NULL
 
     PyObject_AsReadBuffer(packed, <const void**>&buf, &buf_len)
 
@@ -243,16 +244,17 @@ cdef class Unpacker(object):
             for o in unpacker:
                 process(o)
     """
-    cdef unpack_context ctx
-    cdef char* buf
-    cdef size_t buf_size, buf_head, buf_tail
-    cdef object file_like
-    cdef object file_like_read
-    cdef Py_ssize_t read_size
-    # To maintain refcnt.
-    cdef object object_hook, object_pairs_hook, list_hook, ext_hook
-    cdef object encoding, unicode_errors
-    cdef size_t max_buffer_size
+    cdef:
+        unpack_context ctx
+        char* buf
+        size_t buf_size, buf_head, buf_tail
+        object file_like
+        object file_like_read
+        Py_ssize_t read_size
+        # To maintain refcnt.
+        object object_hook, object_pairs_hook, list_hook, ext_hook
+        object encoding, unicode_errors
+        size_t max_buffer_size
 
     def __cinit__(self):
         self.buf = NULL
@@ -270,8 +272,9 @@ cdef class Unpacker(object):
                  Py_ssize_t max_array_len=2147483647,
                  Py_ssize_t max_map_len=2147483647,
                  Py_ssize_t max_ext_len=2147483647):
-        cdef char *cenc=NULL,
-        cdef char *cerr=NULL
+        cdef:
+            char *cenc=NULL,
+            char *cerr=NULL
 
         self.object_hook = object_hook
         self.object_pairs_hook = object_pairs_hook
@@ -388,9 +391,10 @@ cdef class Unpacker(object):
 
     cdef object _unpack(self, execute_fn execute,
                         object write_bytes, bint iter=0):
-        cdef int ret
-        cdef object obj
-        cdef size_t prev_head
+        cdef:
+            int ret
+            object obj
+            size_t prev_head
 
         if self.buf_head >= self.buf_tail and self.file_like is not None:
             self.read_from_file()
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index a5bfd5866a261..9b8fba16741f6 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -203,11 +203,12 @@ cdef enum ColumnTypes:
 
 
 # type the page_data types
-cdef int page_meta_type = const.page_meta_type
-cdef int page_mix_types_0 = const.page_mix_types[0]
-cdef int page_mix_types_1 = const.page_mix_types[1]
-cdef int page_data_type = const.page_data_type
-cdef int subheader_pointers_offset = const.subheader_pointers_offset
+cdef:
+    int page_meta_type = const.page_meta_type
+    int page_mix_types_0 = const.page_mix_types[0]
+    int page_mix_types_1 = const.page_mix_types[1]
+    int page_data_type = const.page_data_type
+    int subheader_pointers_offset = const.subheader_pointers_offset
 
 
 cdef class Parser(object):

From 2b16e2e6c5a298396727fc2e66a60edf1eb13bf9 Mon Sep 17 00:00:00 2001
From: Daniel Saxton <daniel.saxton@gmail.com>
Date: Sat, 26 Jan 2019 21:05:46 -0600
Subject: [PATCH 42/48] DOC: Clean sort_values and sort_index docstrings
 (#24843)

---
 pandas/core/generic.py | 51 +++++++++++++++++++++++-------------------
 pandas/core/series.py  |  4 ++--
 2 files changed, 30 insertions(+), 25 deletions(-)

diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2b97661fe9ec3..a351233a77465 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -530,7 +530,7 @@ def set_axis(self, labels, axis=0, inplace=None):
             The axis to update. The value 0 identifies the rows, and 1
             identifies the columns.
 
-        inplace : boolean, default None
+        inplace : bool, default None
             Whether to return a new %(klass)s instance.
 
             .. warning::
@@ -3966,35 +3966,37 @@ def add_suffix(self, suffix):
     def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
                     kind='quicksort', na_position='last'):
         """
-        Sort by the values along either axis
+        Sort by the values along either axis.
 
         Parameters
         ----------%(optional_by)s
         axis : %(axes_single_arg)s, default 0
-             Axis to be sorted
+             Axis to be sorted.
         ascending : bool or list of bool, default True
              Sort ascending vs. descending. Specify list for multiple sort
              orders.  If this is a list of bools, must match the length of
              the by.
         inplace : bool, default False
-             if True, perform operation in-place
+             If True, perform operation in-place.
         kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
              Choice of sorting algorithm. See also ndarray.np.sort for more
              information.  `mergesort` is the only stable algorithm. For
              DataFrames, this option is only applied when sorting on a single
              column or label.
         na_position : {'first', 'last'}, default 'last'
-             `first` puts NaNs at the beginning, `last` puts NaNs at the end
+             Puts NaNs at the beginning if `first`; `last` puts NaNs at the
+             end.
 
         Returns
         -------
-        sorted_obj : %(klass)s
+        sorted_obj : DataFrame or None
+            DataFrame with sorted values if inplace=False, None otherwise.
 
         Examples
         --------
         >>> df = pd.DataFrame({
-        ...     'col1' : ['A', 'A', 'B', np.nan, 'D', 'C'],
-        ...     'col2' : [2, 1, 9, 8, 7, 4],
+        ...     'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
+        ...     'col2': [2, 1, 9, 8, 7, 4],
         ...     'col3': [0, 1, 9, 4, 2, 3],
         ... })
         >>> df
@@ -4056,32 +4058,35 @@ def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
     def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
                    kind='quicksort', na_position='last', sort_remaining=True):
         """
-        Sort object by labels (along an axis)
+        Sort object by labels (along an axis).
 
         Parameters
         ----------
-        axis : %(axes)s to direct sorting
+        axis : {0 or 'index', 1 or 'columns'}, default 0
+            The axis along which to sort.  The value 0 identifies the rows,
+            and 1 identifies the columns.
         level : int or level name or list of ints or list of level names
-            if not None, sort on values in specified index level(s)
-        ascending : boolean, default True
-            Sort ascending vs. descending
+            If not None, sort on values in specified index level(s).
+        ascending : bool, default True
+            Sort ascending vs. descending.
         inplace : bool, default False
-            if True, perform operation in-place
+            If True, perform operation in-place.
         kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
-             Choice of sorting algorithm. See also ndarray.np.sort for more
-             information.  `mergesort` is the only stable algorithm. For
-             DataFrames, this option is only applied when sorting on a single
-             column or label.
+            Choice of sorting algorithm. See also ndarray.np.sort for more
+            information.  `mergesort` is the only stable algorithm. For
+            DataFrames, this option is only applied when sorting on a single
+            column or label.
         na_position : {'first', 'last'}, default 'last'
-             `first` puts NaNs at the beginning, `last` puts NaNs at the end.
-             Not implemented for MultiIndex.
+            Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
+            Not implemented for MultiIndex.
         sort_remaining : bool, default True
-            if true and sorting by level and index is multilevel, sort by other
-            levels too (in order) after sorting by specified level
+            If True and sorting by level and index is multilevel, sort by other
+            levels too (in order) after sorting by specified level.
 
         Returns
         -------
-        sorted_obj : %(klass)s
+        sorted_obj : DataFrame or None
+            DataFrame with sorted index if inplace=False, None otherwise.
         """
         inplace = validate_bool_kwarg(inplace, 'inplace')
         axis = self._get_axis_number(axis)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0c8e697c572e8..a25aa86a47927 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2857,13 +2857,13 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
             If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.
             Not implemented for MultiIndex.
         sort_remaining : bool, default True
-            If true and sorting by level and index is multilevel, sort by other
+            If True and sorting by level and index is multilevel, sort by other
             levels too (in order) after sorting by specified level.
 
         Returns
         -------
         pandas.Series
-            The original Series sorted by the labels
+            The original Series sorted by the labels.
 
         See Also
         --------

From 22b279569e4abef2c727a5dd7f990558268e3409 Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Sun, 27 Jan 2019 22:17:43 +0100
Subject: [PATCH 43/48] changes based on reviews

---
 pandas/plotting/_core.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 8002811acc88a..cb7df013c4771 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -309,10 +309,10 @@ def _setup_subplots(self):
 
         axes = _flatten(axes)
 
-        valid_log = [False, True, 'sym']
-        for i in [self.logx, self.logy, self.loglog]:
+        valid_log = [False, True, 'sym', None]
+        for i in (self.logx, self.logy, self.loglog):
             if i not in valid_log:
-                raise ValueError("Wrong input for log option.")
+                raise ValueError("Valid inputs are boolean, None and 'sym'.")
 
         if self.logx is True or self.loglog is True:
             [a.set_xscale('log') for a in axes]

From f7241137fe692c37146745cfaacba0ae188baf87 Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Tue, 22 Jan 2019 00:23:18 +0100
Subject: [PATCH 44/48] add sym option for logx logy loglog and tests

---
 pandas/plotting/_core.py            | 14 ++++++++++----
 pandas/tests/plotting/test_frame.py |  6 ++++++
 2 files changed, 16 insertions(+), 4 deletions(-)

diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index e543ab88f53b2..ce5966ebc9c34 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -82,9 +82,9 @@ def _kind(self):
     _default_rot = 0
     orientation = None
     _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
-                       'mark_right', 'stacked']
+                       'mark_right', 'stacked', 'sym']
     _attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
-                      'mark_right': True, 'stacked': False}
+                      'mark_right': True, 'stacked': False, 'sym': False}
 
     def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
                  sharey=False, use_index=True,
@@ -309,9 +309,15 @@ def _setup_subplots(self):
         axes = _flatten(axes)
 
         if self.logx or self.loglog:
-            [a.set_xscale('log') for a in axes]
+            if self.sym:
+                [a.set_xscale('symlog') for a in axes]
+            else:
+                [a.set_xscale('log') for a in axes]
         if self.logy or self.loglog:
-            [a.set_yscale('log') for a in axes]
+            if self.sym:
+                [a.set_yscale('symlog') for a in axes]
+            else:
+                [a.set_yscale('log') for a in axes]
 
         self.fig = fig
         self.axes = axes
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 98b241f5c8206..93508d1f99d91 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -233,12 +233,18 @@ def test_logscales(self):
         df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
         ax = df.plot(logy=True)
         self._check_ax_scales(ax, yaxis='log')
+        ax = df.plot(logy=True, sym=True)
+        self._check_ax_scales(ax, yaxis='symlog')
 
         ax = df.plot(logx=True)
         self._check_ax_scales(ax, xaxis='log')
+        ax = df.plot(logx=True, sym=True)
+        self._check_ax_scales(ax, xaxis='symlog')
 
         ax = df.plot(loglog=True)
         self._check_ax_scales(ax, xaxis='log', yaxis='log')
+        ax = df.plot(loglog=True, sym=True)
+        self._check_ax_scales(ax, xaxis='symlog', yaxis='symlog')
 
     @pytest.mark.slow
     def test_xcompat(self):

From aab92aef385589fc32cfbf03b88bccd181c71567 Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Tue, 22 Jan 2019 23:04:44 +0100
Subject: [PATCH 45/48] changes based on reviews

---
 pandas/plotting/_core.py            | 28 ++++++++++++++++------------
 pandas/tests/plotting/test_frame.py | 26 +++++++++++++++++++++++---
 2 files changed, 39 insertions(+), 15 deletions(-)

diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index ce5966ebc9c34..08627a9249827 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -82,9 +82,9 @@ def _kind(self):
     _default_rot = 0
     orientation = None
     _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
-                       'mark_right', 'stacked', 'sym']
+                       'mark_right', 'stacked']
     _attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
-                      'mark_right': True, 'stacked': False, 'sym': False}
+                      'mark_right': True, 'stacked': False}
 
     def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
                  sharey=False, use_index=True,
@@ -308,16 +308,20 @@ def _setup_subplots(self):
 
         axes = _flatten(axes)
 
-        if self.logx or self.loglog:
-            if self.sym:
-                [a.set_xscale('symlog') for a in axes]
-            else:
-                [a.set_xscale('log') for a in axes]
-        if self.logy or self.loglog:
-            if self.sym:
-                [a.set_yscale('symlog') for a in axes]
-            else:
-                [a.set_yscale('log') for a in axes]
+        valid_log = [False, True, 'sym']
+        for i in [self.logx, self.logy, self.loglog]:
+            if i not in valid_log:
+                raise ValueError("Wrong input for log option.")
+
+        if self.logx is True or self.loglog is True:
+            [a.set_xscale('log') for a in axes]
+        elif self.logx == 'sym' or self.loglog == 'sym':
+            [a.set_xscale('symlog') for a in axes]
+
+        if self.logy is True or self.loglog is True:
+            [a.set_yscale('log') for a in axes]
+        elif self.logy == 'sym' or self.loglog == 'sym':
+            [a.set_yscale('symlog') for a in axes]
 
         self.fig = fig
         self.axes = axes
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 93508d1f99d91..645abc11ab732 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -231,20 +231,40 @@ def test_plot_xy(self):
     @pytest.mark.slow
     def test_logscales(self):
         df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
+
         ax = df.plot(logy=True)
         self._check_ax_scales(ax, yaxis='log')
-        ax = df.plot(logy=True, sym=True)
+        assert ax.get_yscale() == 'log'
+
+        ax = df.plot(logy='sym')
         self._check_ax_scales(ax, yaxis='symlog')
+        assert ax.get_yscale() == 'symlog'
 
         ax = df.plot(logx=True)
         self._check_ax_scales(ax, xaxis='log')
-        ax = df.plot(logx=True, sym=True)
+        assert ax.get_xscale() == 'log'
+
+        ax = df.plot(logx='sym')
         self._check_ax_scales(ax, xaxis='symlog')
+        assert ax.get_xscale() == 'symlog'
 
         ax = df.plot(loglog=True)
         self._check_ax_scales(ax, xaxis='log', yaxis='log')
-        ax = df.plot(loglog=True, sym=True)
+        assert ax.get_xscale() == 'log'
+        assert ax.get_yscale() == 'log'
+
+        ax = df.plot(loglog='sym')
         self._check_ax_scales(ax, xaxis='symlog', yaxis='symlog')
+        assert ax.get_xscale() == 'symlog'
+        assert ax.get_yscale() == 'symlog'
+
+    @pytest.mark.parametrize("wrong_input", ["sm", "symlog"])
+    def test_invalid_logscale(self, wrong_input):
+        df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
+
+        msg = "Wrong input for log option."
+        with pytest.raises(ValueError, match=msg):
+            df.plot(logy=wrong_input)
 
     @pytest.mark.slow
     def test_xcompat(self):

From c04601a71dec5caeb73a96fadb52826deea31320 Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Sun, 27 Jan 2019 22:17:43 +0100
Subject: [PATCH 46/48] changes based on reviews

---
 pandas/plotting/_core.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 08627a9249827..12810c73f11f0 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -308,10 +308,10 @@ def _setup_subplots(self):
 
         axes = _flatten(axes)
 
-        valid_log = [False, True, 'sym']
-        for i in [self.logx, self.logy, self.loglog]:
+        valid_log = [False, True, 'sym', None]
+        for i in (self.logx, self.logy, self.loglog):
             if i not in valid_log:
-                raise ValueError("Wrong input for log option.")
+                raise ValueError("Valid inputs are boolean, None and 'sym'.")
 
         if self.logx is True or self.loglog is True:
             [a.set_xscale('log') for a in axes]

From 41b08c83c6c753f00a0d0cd2e17d5181f4702f65 Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Sun, 27 Jan 2019 22:48:26 +0100
Subject: [PATCH 47/48] add whatsnew

---
 doc/source/whatsnew/v0.25.0.rst | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 5129449e4fdf3..1bc91fc51c0ba 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -18,10 +18,7 @@ including other versions of pandas.
 
 Other Enhancements
 ^^^^^^^^^^^^^^^^^^
-
--
--
--
+:func:`DataFrame.plot` now can take 'sym' so as to expose symlog scaling.
 
 
 .. _whatsnew_0250.api_breaking:

From 0bd227f645ea99c740bf15742c9765d5a948bdc2 Mon Sep 17 00:00:00 2001
From: Kaiqi Dong <kaiqi@kth.se>
Date: Sun, 27 Jan 2019 22:58:59 +0100
Subject: [PATCH 48/48] all my changes in a signle change

---
 doc/source/whatsnew/v0.25.0.rst     |  5 +----
 pandas/plotting/_core.py            | 14 ++++++++++++--
 pandas/tests/plotting/test_frame.py | 26 ++++++++++++++++++++++++++
 3 files changed, 39 insertions(+), 6 deletions(-)

diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 5129449e4fdf3..1bc91fc51c0ba 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -18,10 +18,7 @@ including other versions of pandas.
 
 Other Enhancements
 ^^^^^^^^^^^^^^^^^^
-
--
--
--
+:func:`DataFrame.plot` now can take 'sym' so as to expose symlog scaling.
 
 
 .. _whatsnew_0250.api_breaking:
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index e543ab88f53b2..12810c73f11f0 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -308,10 +308,20 @@ def _setup_subplots(self):
 
         axes = _flatten(axes)
 
-        if self.logx or self.loglog:
+        valid_log = [False, True, 'sym', None]
+        for i in (self.logx, self.logy, self.loglog):
+            if i not in valid_log:
+                raise ValueError("Valid inputs are boolean, None and 'sym'.")
+
+        if self.logx is True or self.loglog is True:
             [a.set_xscale('log') for a in axes]
-        if self.logy or self.loglog:
+        elif self.logx == 'sym' or self.loglog == 'sym':
+            [a.set_xscale('symlog') for a in axes]
+
+        if self.logy is True or self.loglog is True:
             [a.set_yscale('log') for a in axes]
+        elif self.logy == 'sym' or self.loglog == 'sym':
+            [a.set_yscale('symlog') for a in axes]
 
         self.fig = fig
         self.axes = axes
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 98b241f5c8206..645abc11ab732 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -231,14 +231,40 @@ def test_plot_xy(self):
     @pytest.mark.slow
     def test_logscales(self):
         df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
+
         ax = df.plot(logy=True)
         self._check_ax_scales(ax, yaxis='log')
+        assert ax.get_yscale() == 'log'
+
+        ax = df.plot(logy='sym')
+        self._check_ax_scales(ax, yaxis='symlog')
+        assert ax.get_yscale() == 'symlog'
 
         ax = df.plot(logx=True)
         self._check_ax_scales(ax, xaxis='log')
+        assert ax.get_xscale() == 'log'
+
+        ax = df.plot(logx='sym')
+        self._check_ax_scales(ax, xaxis='symlog')
+        assert ax.get_xscale() == 'symlog'
 
         ax = df.plot(loglog=True)
         self._check_ax_scales(ax, xaxis='log', yaxis='log')
+        assert ax.get_xscale() == 'log'
+        assert ax.get_yscale() == 'log'
+
+        ax = df.plot(loglog='sym')
+        self._check_ax_scales(ax, xaxis='symlog', yaxis='symlog')
+        assert ax.get_xscale() == 'symlog'
+        assert ax.get_yscale() == 'symlog'
+
+    @pytest.mark.parametrize("wrong_input", ["sm", "symlog"])
+    def test_invalid_logscale(self, wrong_input):
+        df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
+
+        msg = "Wrong input for log option."
+        with pytest.raises(ValueError, match=msg):
+            df.plot(logy=wrong_input)
 
     @pytest.mark.slow
     def test_xcompat(self):