o
    d]                     @  s  U d Z ddlmZ ddlZddlmZmZmZmZm	Z	m
Z
mZmZmZmZ ddlZddlmZ ddlmZ ddlmZmZmZmZmZmZmZ ddlmZ dd	lm Z! dd
l"m#Z# ddl$m%Z%m&Z& ddl'm(Z( ddl)m*Z*m+Z+m,Z,m-Z-m.Z. ddl/m0Z0m1Z1m2Z2 ddl3m4Z4m5Z5 ddl6m7Z7m8Z8m9Z9 ddl:m;Z; ddl<m=Z= ddl>m?Z? ddl@mAZAmBZB erddlmCZCmDZDmEZEmFZF ddlGmHZHmIZImJZJ i ZKdeLd< dddddZMedddZNG dd de;ZOG d d! d!ZPG d"d# d#ee ZQG d$d de=ZRdS )%z.
Base and utility classes for pandas objects.
    )annotationsN)
TYPE_CHECKINGAnyGenericHashableIteratorLiteralTypeVarcastfinaloverload)using_copy_on_write)lib)AxisAxisIntDtypeObj
IndexLabelNDFrameTShapenpt)PYPY)functionAbstractMethodError)cache_readonlydoc)can_hold_element)is_categorical_dtypeis_dict_likeis_extension_array_dtypeis_object_dtype	is_scalar)ABCDataFrameABCIndex	ABCSeries)isnaremove_na_arraylike)
algorithmsnanopsops)DirNamesMixin)OpsMixin)ExtensionArray)ensure_wrapped_if_datetimelikeextract_array)DropKeepNumpySorterNumpyValueArrayLikeScalarLike_co)CategoricalIndexSerieszdict[str, str]_shared_docsIndexOpsMixin )klassinplaceunique
duplicated_T)boundc                      sN   e Zd ZU dZded< edd Zddd	ZddddZd fddZ	  Z
S )PandasObjectz/
    Baseclass for various pandas objects.
    zdict[str, Any]_cachec                 C  s   t | S )zJ
        Class constructor (for this class it's just `__class__`.
        )typeself rD   U/var/www/html/visualizacion-main/env/lib/python3.10/site-packages/pandas/core/base.py_constructorl      zPandasObject._constructorreturnstrc                 C  s
   t | S )zI
        Return a string representation for a particular object.
        )object__repr__rB   rD   rD   rE   rK   s      
zPandasObject.__repr__Nkey
str | NoneNonec                 C  s6   t | dsdS |du r| j  dS | j|d dS )zV
        Reset cached properties. If ``key`` is passed, only clears that key.
        r@   N)hasattrr@   clearpop)rC   rM   rD   rD   rE   _reset_cachez   s
   
zPandasObject._reset_cacheintc                   s>   t | dd}|r|dd}tt|r|S | S t  S )zx
        Generates the total memory usage for an object that returns
        either a value or Series of values
        memory_usageNTdeep)getattrrT   r!   sumsuper
__sizeof__)rC   rU   mem	__class__rD   rE   r[      s
   

zPandasObject.__sizeof__)rH   rI   N)rM   rN   rH   rO   rH   rT   )__name__
__module____qualname____doc____annotations__propertyrF   rK   rS   r[   __classcell__rD   rD   r]   rE   r?   d   s   
 

r?   c                   @  s$   e Zd ZdZdddZddd	Zd
S )NoNewAttributesMixina  
    Mixin which prevents adding new attributes.

    Prevents additional attributes via xxx.attribute = "something" after a
    call to `self.__freeze()`. Mainly used to prevent the user from using
    wrong attributes on an accessor (`Series.cat/.str/.dt`).

    If you really want to add a new attribute at a later time, you need to use
    `object.__setattr__(self, key, value)`.
    rH   rO   c                 C  s   t | dd dS )z9
        Prevents setting additional attributes.
        __frozenTN)rJ   __setattr__rB   rD   rD   rE   _freeze   s   zNoNewAttributesMixin._freezerM   rI   c                 C  sT   t | ddr!|dks!|t| jv s!t | |d d us!td| dt| || d S )Nri   Fr@   z"You cannot add any new attribute '')rX   rA   __dict__AttributeErrorrJ   rj   )rC   rM   valuerD   rD   rE   rj      s   z NoNewAttributesMixin.__setattr__N)rH   rO   )rM   rI   rH   rO   )ra   rb   rc   rd   rk   rj   rD   rD   rD   rE   rh      s    
rh   c                   @  s   e Zd ZU dZded< dZded< ded< d	d
gZeeZe	e
dd Zedd Ze	edddZe	edd Zdd ZddddZdd ZeZdS )SelectionMixinz
    mixin implementing the selection & aggregation interface on a group-like
    object sub-classes need to define: obj, exclusions
    r   objNzIndexLabel | None
_selectionzfrozenset[Hashable]
exclusionsr@   __setstate__c                 C  s&   t | jtttttjfs| jgS | jS r_   )
isinstancerr   listtupler$   r#   npndarrayrB   rD   rD   rE   _selection_list   s
   zSelectionMixin._selection_listc                 C  s(   | j d u st| jtr| jS | j| j  S r_   )rr   ru   rq   r$   rB   rD   rD   rE   _selected_obj   s   zSelectionMixin._selected_objrH   rT   c                 C     | j jS r_   )r{   ndimrB   rD   rD   rE   r}         zSelectionMixin.ndimc                 C  sR   t | jtr	| jS | jd ur| j| jS t| jdkr&| jj| jdddS | jS )Nr      T)axis
only_slice)	ru   rq   r$   rr   _getitem_nocopyrz   lenrs   
_drop_axisrB   rD   rD   rE   _obj_with_exclusions   s   
z#SelectionMixin._obj_with_exclusionsc                 C  s   | j d urtd| j  dt|tttttjfrIt	| j
j|t	t|kr@tt|| j
j}tdt|dd  | jt|ddS || j
vrUtd| | j
| j}| j||dS )	Nz
Column(s) z already selectedzColumns not found: r      )r}   zColumn not found: )rr   
IndexErrorru   rv   rw   r$   r#   rx   ry   r   rq   columnsintersectionset
differenceKeyErrorrI   _gotitemr}   )rC   rM   bad_keysr}   rD   rD   rE   __getitem__   s   

zSelectionMixin.__getitem__r}   c                 C     t | )a  
        sub-classes to define
        return a sliced object

        Parameters
        ----------
        key : str / list of selections
        ndim : {1, 2}
            requested ndim of result
        subset : object, default None
            subset to act on
        r   )rC   rM   r}   subsetrD   rD   rE   r      s   zSelectionMixin._gotitemc                 O  r   r_   r   )rC   funcargskwargsrD   rD   rE   	aggregate  s   zSelectionMixin.aggregater`   r_   )r}   rT   )ra   rb   rc   rd   re   rr   _internal_namesr   _internal_names_setr   rf   rz   r   r{   r}   r   r   r   r   aggrD   rD   rD   rE   rp      s,   
 
rp   c                   @  s  e Zd ZU dZdZedgZded< eddd	Z	edddZ
edddZeeddZedddZdddZedddZedd ZedddZedd d!Zedd#d$Zed%d&ejfdd.d/Zeedd0d1Zddd6d7Zed8d9d:d;	2ddd<d=Zddd>d?Zeed9d8d@d;	2dddAdBZdCdD ZeZddFdGZ e!ddHdIZ"ddKdLZ#dMd2d%d%dNddRdSZ$eddTdUZ%e	&	2	&	%	2ddd[d\Z&d]d^ Z'eddd_d`Z(eddadbZ)eddcddZ*eddedfZ+edddhdiZ,ee-j.djdjdje/0dkdl	&	2dddodpZ.dqe1dr< e2	s	sddd{d|Z3e2	s	sdddd|Z3ee1dr dd		%dddd|Z3dddddZ4eddddZ5dd Z6dd Z7d%S )r7   zS
    Common ops mixin to support a unified interface / docs for Series / Index
    i  tolistzfrozenset[str]_hidden_attrsrH   r   c                 C  r   r_   r   rB   rD   rD   rE   dtype  r~   zIndexOpsMixin.dtypeExtensionArray | np.ndarrayc                 C  r   r_   r   rB   rD   rD   rE   _values  r~   zIndexOpsMixin._valuesrC   r=   c                 O  s   t || | S )zw
        Return the transpose, which is by definition self.

        Returns
        -------
        %(klass)s
        )nvvalidate_transpose)rC   r   r   rD   rD   rE   	transpose"  s   	zIndexOpsMixin.transposezD
        Return the transpose, which is by definition self.
        )r   r   c                 C  r|   )z
        Return a tuple of the shape of the underlying data.

        Examples
        --------
        >>> s = pd.Series([1, 2, 3])
        >>> s.shape
        (3,)
        )r   shaperB   rD   rD   rE   r   5  s   zIndexOpsMixin.shaperT   c                 C  r   r_   r   rB   rD   rD   rE   __len__B  s   zIndexOpsMixin.__len__
Literal[1]c                 C  s   dS )zO
        Number of dimensions of the underlying data, by definition 1.
        r   rD   rB   rD   rD   rE   r}   F  s   zIndexOpsMixin.ndimc                 C  s    t | dkrtt| S td)a  
        Return the first element of the underlying data as a Python scalar.

        Returns
        -------
        scalar
            The first element of %(klass)s.

        Raises
        ------
        ValueError
            If the data is not length-1.
        r   z6can only convert an array of size 1 to a Python scalar)r   nextiter
ValueErrorrB   rD   rD   rE   itemM  s   zIndexOpsMixin.itemc                 C  r|   )zD
        Return the number of bytes in the underlying data.
        )r   nbytesrB   rD   rD   rE   r   `  rG   zIndexOpsMixin.nbytesc                 C  
   t | jS )zG
        Return the number of elements in the underlying data.
        )r   r   rB   rD   rD   rE   sizeg  rL   zIndexOpsMixin.sizer,   c                 C  r   )aM  
        The ExtensionArray of the data backing this Series or Index.

        Returns
        -------
        ExtensionArray
            An ExtensionArray of the values stored within. For extension
            types, this is the actual array. For NumPy native types, this
            is a thin (no copy) wrapper around :class:`numpy.ndarray`.

            ``.array`` differs ``.values`` which may require converting the
            data to a different form.

        See Also
        --------
        Index.to_numpy : Similar method that always returns a NumPy array.
        Series.to_numpy : Similar method that always returns a NumPy array.

        Notes
        -----
        This table lays out the different array types for each extension
        dtype within pandas.

        ================== =============================
        dtype              array type
        ================== =============================
        category           Categorical
        period             PeriodArray
        interval           IntervalArray
        IntegerNA          IntegerArray
        string             StringArray
        boolean            BooleanArray
        datetime64[ns, tz] DatetimeArray
        ================== =============================

        For any 3rd-party extension types, the array type will be an
        ExtensionArray.

        For all remaining dtypes ``.array`` will be a
        :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
        stored within. If you absolutely need a NumPy array (possibly with
        copying / coercing data), then use :meth:`Series.to_numpy` instead.

        Examples
        --------
        For regular NumPy types like int, and float, a PandasArray
        is returned.

        >>> pd.Series([1, 2, 3]).array
        <PandasArray>
        [1, 2, 3]
        Length: 3, dtype: int64

        For extension types, like Categorical, the actual ExtensionArray
        is returned

        >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
        >>> ser.array
        ['a', 'b', 'a']
        Categories (2, object): ['a', 'b']
        r   rB   rD   rD   rE   arrayn  s   ?zIndexOpsMixin.arrayNFr   npt.DTypeLike | Nonecopyboolna_valuerJ   
np.ndarrayc                 K  s  t | jr| jj|f||d|S |r$t| d }td| d|tjurG| j	}t
||s9tj||d}n| }||t|  < n| j	}tj||d}|rX|tju s]|st rt| j	dd |dd rt r{|s{| }d|j_|S | }|S )	a  
        A NumPy ndarray representing the values in this Series or Index.

        Parameters
        ----------
        dtype : str or numpy.dtype, optional
            The dtype to pass to :meth:`numpy.asarray`.
        copy : bool, default False
            Whether to ensure that the returned value is not a view on
            another array. Note that ``copy=False`` does not *ensure* that
            ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
            a copy is made, even if not strictly necessary.
        na_value : Any, optional
            The value to use for missing values. The default value depends
            on `dtype` and the type of the array.
        **kwargs
            Additional keywords passed through to the ``to_numpy`` method
            of the underlying array (for extension arrays).

        Returns
        -------
        numpy.ndarray

        See Also
        --------
        Series.array : Get the actual data stored within.
        Index.array : Get the actual data stored within.
        DataFrame.to_numpy : Similar method for DataFrame.

        Notes
        -----
        The returned array will be the same up to equality (values equal
        in `self` will be equal in the returned array; likewise for values
        that are not equal). When `self` contains an ExtensionArray, the
        dtype may be different. For example, for a category-dtype Series,
        ``to_numpy()`` will return a NumPy array and the categorical dtype
        will be lost.

        For NumPy dtypes, this will be a reference to the actual data stored
        in this Series or Index (assuming ``copy=False``). Modifying the result
        in place will modify the data stored in the Series or Index (not that
        we recommend doing that).

        For extension types, ``to_numpy()`` *may* require copying data and
        coercing the result to a NumPy type (possibly object), which may be
        expensive. When you need a no-copy reference to the underlying data,
        :attr:`Series.array` should be used instead.

        This table lays out the different dtypes and default return types of
        ``to_numpy()`` for various dtypes within pandas.

        ================== ================================
        dtype              array type
        ================== ================================
        category[T]        ndarray[T] (same dtype as input)
        period             ndarray[object] (Periods)
        interval           ndarray[object] (Intervals)
        IntegerNA          ndarray[object]
        datetime64[ns]     datetime64[ns]
        datetime64[ns, tz] ndarray[object] (Timestamps)
        ================== ================================

        Examples
        --------
        >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
        >>> ser.to_numpy()
        array(['a', 'b', 'a'], dtype=object)

        Specify the `dtype` to control how datetime-aware data is represented.
        Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
        objects, each with the correct ``tz``.

        >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
        >>> ser.to_numpy(dtype=object)
        array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
               Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
              dtype=object)

        Or ``dtype='datetime64[ns]'`` to return an ndarray of native
        datetime64 values. The values are converted to UTC and the timezone
        info is dropped.

        >>> ser.to_numpy(dtype="datetime64[ns]")
        ... # doctest: +ELLIPSIS
        array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
              dtype='datetime64[ns]')
        )r   r   r   z/to_numpy() got an unexpected keyword argument 'rl   r   Nr   F)r   r   r   to_numpyrv   keys	TypeErrorr   
no_defaultr   r   rx   asarrayr   
asanyarrayr%   r   shares_memoryviewflags	writeable)rC   r   r   r   r   r   valuesresultrD   rD   rE   r     s6   
_



zIndexOpsMixin.to_numpyc                 C  s   | j  S r_   )r   rB   rD   rD   rE   empty3  r~   zIndexOpsMixin.emptyTr   AxisInt | Noneskipnac                 O  &   t | t || tj| j|dS )a  
        Return the maximum value of the Index.

        Parameters
        ----------
        axis : int, optional
            For compatibility with NumPy. Only 0 or None are allowed.
        skipna : bool, default True
            Exclude NA/null values when showing the result.
        *args, **kwargs
            Additional arguments and keywords for compatibility with NumPy.

        Returns
        -------
        scalar
            Maximum value.

        See Also
        --------
        Index.min : Return the minimum value in an Index.
        Series.max : Return the maximum value in a Series.
        DataFrame.max : Return the maximum values in a DataFrame.

        Examples
        --------
        >>> idx = pd.Index([3, 2, 1])
        >>> idx.max()
        3

        >>> idx = pd.Index(['c', 'b', 'a'])
        >>> idx.max()
        'c'

        For a MultiIndex, the maximum is determined lexicographically.

        >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
        >>> idx.max()
        ('b', 2)
        r   )r   validate_minmax_axisvalidate_maxr(   nanmaxr   rC   r   r   r   r   rD   rD   rE   max8     
(zIndexOpsMixin.maxr   minlargest)opopposero   c                 O  R   | j }t| t|||}t|tr"|s|  rdS | S t	j
||dS )ab  
        Return int position of the {value} value in the Series.

        If the {op}imum is achieved in multiple locations,
        the first row position is returned.

        Parameters
        ----------
        axis : {{None}}
            Unused. Parameter needed for compatibility with DataFrame.
        skipna : bool, default True
            Exclude NA/null values when showing the result.
        *args, **kwargs
            Additional arguments and keywords for compatibility with NumPy.

        Returns
        -------
        int
            Row position of the {op}imum value.

        See Also
        --------
        Series.arg{op} : Return position of the {op}imum value.
        Series.arg{oppose} : Return position of the {oppose}imum value.
        numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
        Series.idxmax : Return index label of the maximum values.
        Series.idxmin : Return index label of the minimum values.

        Examples
        --------
        Consider dataset containing cereal calories

        >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
        ...                'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
        >>> s
        Corn Flakes              100.0
        Almond Delight           110.0
        Cinnamon Toast Crunch    120.0
        Cocoa Puff               110.0
        dtype: float64

        >>> s.argmax()
        2
        >>> s.argmin()
        0

        The maximum cereal calories is the third element and
        the minimum cereal calories is the first element,
        since series is zero-indexed.
        r   r   )r   r   r   validate_argmax_with_skipnaru   r,   r%   anyargmaxr(   	nanargmaxrC   r   r   r   r   delegaterD   rD   rE   r   d  s   6

zIndexOpsMixin.argmaxc                 O  r   )a  
        Return the minimum value of the Index.

        Parameters
        ----------
        axis : {None}
            Dummy argument for consistency with Series.
        skipna : bool, default True
            Exclude NA/null values when showing the result.
        *args, **kwargs
            Additional arguments and keywords for compatibility with NumPy.

        Returns
        -------
        scalar
            Minimum value.

        See Also
        --------
        Index.max : Return the maximum value of the object.
        Series.min : Return the minimum value in a Series.
        DataFrame.min : Return the minimum values in a DataFrame.

        Examples
        --------
        >>> idx = pd.Index([3, 2, 1])
        >>> idx.min()
        1

        >>> idx = pd.Index(['c', 'b', 'a'])
        >>> idx.min()
        'a'

        For a MultiIndex, the minimum is determined lexicographically.

        >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
        >>> idx.min()
        ('a', 1)
        r   )r   r   validate_minr(   nanminr   r   rD   rD   rE   r     r   zIndexOpsMixin.minsmallestc                 O  r   )Nr   r   )r   r   r   validate_argmin_with_skipnaru   r,   r%   r   argminr(   	nanargminr   rD   rD   rE   r     s   

zIndexOpsMixin.argminc                 C  s
   | j  S )a  
        Return a list of the values.

        These are each a scalar type, which is a Python scalar
        (for str, int, float) or a pandas scalar
        (for Timestamp/Timedelta/Interval/Period)

        Returns
        -------
        list

        See Also
        --------
        numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
            nested list of Python scalars.
        )r   r   rB   rD   rD   rE   r     s   
zIndexOpsMixin.tolistr   c                 C  s.   t | jtjst| jS t| jjt| jjS )a  
        Return an iterator of the values.

        These are each a scalar type, which is a Python scalar
        (for str, int, float) or a pandas scalar
        (for Timestamp/Timedelta/Interval/Period)

        Returns
        -------
        iterator
        )	ru   r   rx   ry   r   mapr   ranger   rB   rD   rD   rE   __iter__  s   
zIndexOpsMixin.__iter__c                 C  s   t t|  S )z
        Return True if there are any NaNs.

        Enables various performance speedups.

        Returns
        -------
        bool
        )r   r%   r   rB   rD   rD   rE   hasnans  s   zIndexOpsMixin.hasnansnpt.NDArray[np.bool_]c                 C  r   r_   )r%   r   rB   rD   rD   rE   r%   !  s   
zIndexOpsMixin.isnar   )r   r   numeric_onlyfilter_typenamerI   r   c          	      K  s>   t | |d}|du rtt| j d| |dd|i|S )zA
        Perform the reduction type operation if we can.
        Nz cannot perform the operation r   rD   )rX   r   rA   ra   )	rC   r   r   r   r   r   r   kwdsr   rD   rD   rE   _reduce$  s   zIndexOpsMixin._reducec           
        s`  t |r/t|trt|dr|  fdd}nddlm} t|dkr+||tjd}n||}t|t	ro|dvrBd| d	}t
||d
krM||j  }t| jr]td| j}||S | j}|j|}t|j|}|S t| jrt| jdr| j}|durtdd }	n!| jt}|d
krdd }	n|du rtj}	n
d| d	}t
||	||}|S )a  
        An internal function that maps values using the input
        correspondence (which can be a dict, Series, or function).

        Parameters
        ----------
        mapper : function, dict, or Series
            The input correspondence object
        na_action : {None, 'ignore'}
            If 'ignore', propagate NA values, without passing them to the
            mapping function

        Returns
        -------
        Union[Index, MultiIndex], inferred
            The output of the mapping function applied to the index.
            If the function returns a tuple with more than one element
            a MultiIndex will be returned.
        __missing__c                   s$    t | trt| rtj S |  S r_   )ru   floatrx   isnannan)xdict_with_defaultrD   rE   <lambda>V  s
    z+IndexOpsMixin._map_values.<locals>.<lambda>r   )r5   r   )Nignorez+na_action must either be 'ignore' or None, z was passedr   r3   r   Nc                 S  s
   |  |S r_   )r   r   frD   rD   rE   r     s   
 c                 S  s   t | |t| tjS r_   )r   map_infer_maskr%   r   rx   uint8r   rD   rD   rE   r     s    )r   ru   dictrP   pandasr5   r   rx   float64r$   r   indexnotnar   r   r
   r   r   get_indexerr'   take_ndr   NotImplementedErrorastyperJ   r   	map_infer)
rC   mapper	na_actionr5   msgcatr   indexer
new_valuesmap_frD   r   rE   _map_values9  sR   





zIndexOpsMixin._map_values	normalizesort	ascendingdropnar5   c                 C  s   t j| |||||dS )a	  
        Return a Series containing counts of unique values.

        The resulting object will be in descending order so that the
        first element is the most frequently-occurring element.
        Excludes NA values by default.

        Parameters
        ----------
        normalize : bool, default False
            If True then the object returned will contain the relative
            frequencies of the unique values.
        sort : bool, default True
            Sort by frequencies.
        ascending : bool, default False
            Sort in ascending order.
        bins : int, optional
            Rather than count values, group them into half-open bins,
            a convenience for ``pd.cut``, only works with numeric data.
        dropna : bool, default True
            Don't include counts of NaN.

        Returns
        -------
        Series

        See Also
        --------
        Series.count: Number of non-NA elements in a Series.
        DataFrame.count: Number of non-NA elements in a DataFrame.
        DataFrame.value_counts: Equivalent method on DataFrames.

        Examples
        --------
        >>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
        >>> index.value_counts()
        3.0    2
        1.0    1
        2.0    1
        4.0    1
        Name: count, dtype: int64

        With `normalize` set to `True`, returns the relative frequency by
        dividing all values by the sum of values.

        >>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
        >>> s.value_counts(normalize=True)
        3.0    0.4
        1.0    0.2
        2.0    0.2
        4.0    0.2
        Name: proportion, dtype: float64

        **bins**

        Bins can be useful for going from a continuous variable to a
        categorical variable; instead of counting unique
        apparitions of values, divide the index in the specified
        number of half-open bins.

        >>> s.value_counts(bins=3)
        (0.996, 2.0]    2
        (2.0, 3.0]      2
        (3.0, 4.0]      1
        Name: count, dtype: int64

        **dropna**

        With `dropna` set to `False` we can also see NaN index values.

        >>> s.value_counts(dropna=False)
        3.0    2
        1.0    1
        2.0    1
        4.0    1
        NaN    1
        Name: count, dtype: int64
        )r  r  r  binsr  )r'   value_counts)rC   r  r  r  r  r  rD   rD   rE   r	    s   WzIndexOpsMixin.value_countsc                 C  s,   | j }t|tjs| }|S t|}|S r_   )r   ru   rx   ry   r;   r'   unique1d)rC   r   r   rD   rD   rE   r;      s   
zIndexOpsMixin.uniquec                 C  s   |   }|r
t|}t|S )a  
        Return number of unique elements in the object.

        Excludes NA values by default.

        Parameters
        ----------
        dropna : bool, default True
            Don't include NaN in the count.

        Returns
        -------
        int

        See Also
        --------
        DataFrame.nunique: Method nunique for DataFrame.
        Series.count: Count non-NA/null observations in the Series.

        Examples
        --------
        >>> s = pd.Series([1, 3, 5, 7, 7])
        >>> s
        0    1
        1    3
        2    5
        3    7
        4    7
        dtype: int64

        >>> s.nunique()
        4
        )r;   r&   r   )rC   r  uniqsrD   rD   rE   nunique	  s   #zIndexOpsMixin.nuniquec                 C  s   | j ddt| kS )zr
        Return boolean if values in the object are unique.

        Returns
        -------
        bool
        F)r  )r  r   rB   rD   rD   rE   	is_unique1  s   	zIndexOpsMixin.is_uniquec                 C     ddl m} || jS )z
        Return boolean if values in the object are monotonically increasing.

        Returns
        -------
        bool
        r   r4   )r   r4   is_monotonic_increasingrC   r4   rD   rD   rE   r  <     	
z%IndexOpsMixin.is_monotonic_increasingc                 C  r  )z
        Return boolean if values in the object are monotonically decreasing.

        Returns
        -------
        bool
        r   r  )r   r4   is_monotonic_decreasingr  rD   rD   rE   r  I  r  z%IndexOpsMixin.is_monotonic_decreasingrW   c                 C  sR   t | jdr| jj|dS | jj}|r't| r'ts'ttj| j	}|t
|7 }|S )aN  
        Memory usage of the values.

        Parameters
        ----------
        deep : bool, default False
            Introspect the data deeply, interrogate
            `object` dtypes for system-level memory consumption.

        Returns
        -------
        bytes used

        See Also
        --------
        numpy.ndarray.nbytes : Total bytes consumed by the elements of the
            array.

        Notes
        -----
        Memory usage does not include memory consumed by elements that
        are not components of the array if deep=False or if used on PyPy
        rU   rV   )rP   r   rU   r   r    r   r
   rx   ry   r   r   memory_usage_of_objects)rC   rW   vr   rD   rD   rE   _memory_usageV  s   zIndexOpsMixin._memory_usager8   z            sort : bool, default False
                Sort `uniques` and shuffle `codes` to maintain the
                relationship.
            )r   order	size_hintr  use_na_sentinel"tuple[npt.NDArray[np.intp], Index]c                 C  sf   t j| j||d\}}|jtjkr|tj}t| t	r%| 
|}||fS ddlm} ||}||fS )N)r  r  r   r  )r'   	factorizer   r   rx   float16r   float32ru   r#   rF   r   r4   )rC   r  r  codesuniquesr4   rD   rD   rE   r  z  s   


zIndexOpsMixin.factorizea  
        Find indices where elements should be inserted to maintain order.

        Find the indices into a sorted {klass} `self` such that, if the
        corresponding elements in `value` were inserted before the indices,
        the order of `self` would be preserved.

        .. note::

            The {klass} *must* be monotonically sorted, otherwise
            wrong locations will likely be returned. Pandas does *not*
            check this for you.

        Parameters
        ----------
        value : array-like or scalar
            Values to insert into `self`.
        side : {{'left', 'right'}}, optional
            If 'left', the index of the first suitable location found is given.
            If 'right', return the last such index.  If there is no suitable
            index, return either 0 or N (where N is the length of `self`).
        sorter : 1-D array-like, optional
            Optional array of integer indices that sort `self` into ascending
            order. They are typically the result of ``np.argsort``.

        Returns
        -------
        int or array of int
            A scalar or array of insertion points with the
            same shape as `value`.

        See Also
        --------
        sort_values : Sort by the values along either axis.
        numpy.searchsorted : Similar method from NumPy.

        Notes
        -----
        Binary search is used to find the required insertion points.

        Examples
        --------
        >>> ser = pd.Series([1, 2, 3])
        >>> ser
        0    1
        1    2
        2    3
        dtype: int64

        >>> ser.searchsorted(4)
        3

        >>> ser.searchsorted([0, 4])
        array([0, 3])

        >>> ser.searchsorted([1, 3], side='left')
        array([0, 2])

        >>> ser.searchsorted([1, 3], side='right')
        array([1, 3])

        >>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
        >>> ser
        0   2000-03-11
        1   2000-03-12
        2   2000-03-13
        dtype: datetime64[ns]

        >>> ser.searchsorted('3/14/2000')
        3

        >>> ser = pd.Categorical(
        ...     ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
        ... )
        >>> ser
        ['apple', 'bread', 'bread', 'cheese', 'milk']
        Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']

        >>> ser.searchsorted('bread')
        1

        >>> ser.searchsorted(['bread'], side='right')
        array([3])

        If the values are not monotonically sorted, wrong locations
        may be returned:

        >>> ser = pd.Series([2, 1, 3])
        >>> ser
        0    2
        1    1
        2    3
        dtype: int64

        >>> ser.searchsorted(1)  # doctest: +SKIP
        0  # wrong result, correct would be 1
        searchsorted.ro   r2   sideLiteral['left', 'right']sorterr0   np.intpc                 C     d S r_   rD   rC   ro   r!  r#  rD   rD   rE   r     s   zIndexOpsMixin.searchsortednpt.ArrayLike | ExtensionArraynpt.NDArray[np.intp]c                 C  r%  r_   rD   r&  rD   rD   rE   r     s   r4   )r9   left$NumpyValueArrayLike | ExtensionArraynpt.NDArray[np.intp] | np.intpc                 C  sX   t |trdt|j d}t|| j}t |tjs#|j|||dS t	j||||dS )Nz(Value must be 1-D array-like or scalar, z is not supported)r!  r#  )
ru   r"   rA   ra   r   r   rx   ry   r   r'   )rC   ro   r!  r#  r   r   rD   rD   rE   r     s   
firstkeepr.  r/   c                C  s   | j |d}| |  S Nr-  )_duplicated)rC   r.  r<   rD   rD   rE   drop_duplicates2  s   
zIndexOpsMixin.drop_duplicatesc                 C  s   t j| j|dS r/  )r'   r<   r   )rC   r.  rD   rD   rE   r0  7  s   zIndexOpsMixin._duplicatedc                 C  s~   t | |}| j}t|ddd}t ||j}t|}tjdd t 	|||}W d    n1 s3w   Y  | j
||dS )NT)extract_numpyextract_ranger   )all)r   )r)   get_op_result_namer   r.   maybe_prepare_scalar_for_opr   r-   rx   errstatearithmetic_op_construct_result)rC   otherr   res_namelvaluesrvaluesr   rD   rD   rE   _arith_method;  s   zIndexOpsMixin._arith_methodc                 C  r   )z~
        Construct an appropriately-wrapped result from the ArrayLike result
        of an arithmetic-like operation.
        r   )rC   r   r   rD   rD   rE   r9  H  rG   zIndexOpsMixin._construct_result)rH   r   )rH   r   )rC   r=   rH   r=   )rH   r   r`   )rH   r   )rH   r,   )r   r   r   r   r   rJ   rH   r   )rH   r   )NT)r   r   r   r   )r   r   r   r   rH   rT   )rH   r   )rH   r   )r   rI   r   r   r   r   r_   )FTFNT)
r  r   r  r   r  r   r  r   rH   r5   )T)r  r   rH   rT   )F)rW   r   rH   rT   )FT)r  r   r  r   rH   r  )..)ro   r2   r!  r"  r#  r0   rH   r$  )ro   r'  r!  r"  r#  r0   rH   r(  )r)  N)ro   r*  r!  r"  r#  r0   rH   r+  )r.  r/   )r,  )r.  r/   rH   r   )8ra   rb   rc   rd   __array_priority__	frozensetr   re   rf   r   r   r   r   Tr   r   r}   r   r   r   r   r   r   r   r   r   r   r   r   r   r   to_listr   r   r   r%   r   r  r	  r;   r  r  r  r  r  r'   r  textwrapdedentr6   r   r   r1  r0  r>  r9  rD   rD   rD   rE   r7     s   
 

@ ,E,

f_	'
#g)Srd   
__future__r   rC  typingr   r   r   r   r   r   r	   r
   r   r   numpyrx   pandas._configr   pandas._libsr   pandas._typingr   r   r   r   r   r   r   pandas.compatr   pandas.compat.numpyr   r   pandas.errorsr   pandas.util._decoratorsr   r   pandas.core.dtypes.castr   pandas.core.dtypes.commonr   r   r   r    r!   pandas.core.dtypes.genericr"   r#   r$   pandas.core.dtypes.missingr%   r&   pandas.corer'   r(   r)   pandas.core.accessorr*   pandas.core.arrayliker+   pandas.core.arraysr,   pandas.core.constructionr-   r.   r/   r0   r1   r2   r   r3   r4   r5   r6   re   _indexops_doc_kwargsr=   r?   rh   rp   r7   rD   rD   rD   rE   <module>   sF    0$	/"X