
     ht\                     .   d Z ddlmZ ddlZddlmZ ddlmZ ddl	m
Z
mZ dd	lmZmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm Z  ddl!m"Z" ddl#m$Z$ dgZ%d Z&d Z'd Z(dddddddd d!Z)dS )"zBPartial dependence plots for regression and classification models.    )IterableN)sparse)
mquantiles   )_check_feature_names_get_feature_index   )is_classifieris_regressor)	cartesian)check_array)check_matplotlib_support)_safe_indexing)_safe_assign)_determine_key_type)_get_column_indices)check_is_fitted)Bunch)DecisionTreeRegressor)RandomForestRegressor)NotFittedError)BaseGradientBoosting)BaseHistGradientBoostingpartial_dependencec                 V   t          |t                    rt          |          dk    rt          d          t	          d |D                       st          d          |d         |d         k    rt          d          |dk    rt          d          g }t          |          D ]\  }}	 t          j        t          | |d	                    }n&# t          $ r}t          d
| d          |d}~ww xY w|s|j
        d         |k     r|}	nvt          t          | |d	          |d          }
t          j        |
d         |
d                   rt          d          t          j        |
d         |
d         |d          }	|                    |	           t          |          |fS )a   Generate a grid of points based on the percentiles of X.

    The grid is a cartesian product between the columns of ``values``. The
    ith column of ``values`` consists in ``grid_resolution`` equally-spaced
    points between the percentiles of the jth column of X.

    If ``grid_resolution`` is bigger than the number of unique values in the
    j-th column of X or if the feature is a categorical feature (by inspecting
    `is_categorical`) , then those unique values will be used instead.

    Parameters
    ----------
    X : array-like of shape (n_samples, n_target_features)
        The data.

    percentiles : tuple of float
        The percentiles which are used to construct the extreme values of
        the grid. Must be in [0, 1].

    is_categorical : list of bool
        For each feature, tells whether it is categorical or not. If a feature
        is categorical, then the values used will be the unique ones
        (i.e. categories) instead of the percentiles.

    grid_resolution : int
        The number of equally spaced points to be placed on the grid for each
        feature.

    Returns
    -------
    grid : ndarray of shape (n_points, n_target_features)
        A value for each feature at each point in the grid. ``n_points`` is
        always ``<= grid_resolution ** X.shape[1]``.

    values : list of 1d ndarrays
        The values with which the grid has been created. The size of each
        array ``values[j]`` is either ``grid_resolution``, or the number of
        unique values in ``X[:, j]``, whichever is smaller.
    r	   z/'percentiles' must be a sequence of 2 elements.c              3   6   K   | ]}d |cxk    odk    nc V  dS )r   r   N ).0xs     b/var/www/html/Sam_Eipo/venv/lib/python3.11/site-packages/sklearn/inspection/_partial_dependence.py	<genexpr>z_grid_from_X.<locals>.<genexpr>Q   s6      00qqA{{{{{{{{000000    z''percentiles' values must be in [0, 1].r   r   z9percentiles[0] must be strictly less than percentiles[1].z2'grid_resolution' must be strictly greater than 1.axiszThe column #z contains mixed data types. Finding unique categories fail due to sorting. It usually means that the column contains `np.nan` values together with `str` categories. Such use case is not yet supported in scikit-learn.N)probr$   ztpercentiles are too close to each other, unable to build the grid. Please choose percentiles that are further apart.T)numendpoint)
isinstancer   len
ValueErrorall	enumeratenpuniquer   	TypeErrorshaper   allcloselinspaceappendr   )Xpercentilesis_categoricalgrid_resolutionvaluesfeatureis_catuniquesexcr$   emp_percentiless              r    _grid_from_Xr>   '   s   P k8,, LK0@0@A0E0EJKKK00K00000 DBCCC1~Q''TUUU!MNNNF %^44 " "
	iq' B B BCCGG 	 	 	 =w = = = 
 	  	W]1%77 DD )q'2221  O {?1-q/ABB  .  
 ;""#	  D 	dVf$$s   -$C
C5C00C5c                 t    |                      ||          }|j        dk    r|                    dd          }|S )Nr   )%_compute_partial_dependence_recursionndimreshape)estgridfeaturesaveraged_predictionss       r    _partial_dependence_recursionrH      sD    DDT8TT A%%  4;;ArBBr"   c                    g }g }t          |           r| j        }nrt          | dd           }t          | dd           }	|dk    r|p|	}n
|dk    r|n|	}|9|dk    rt          d          |dk    rt          d          t          d          |                                }
|D ]}t          |          D ]\  }}t          |
||         |           	  ||
          }|                    |           |                    t          j	        |d	                     z# t          $ r}t          d
          |d }~ww xY w|j        d         }t          j        |          j        }t          |           r"|j        dk    r|                    |d          }n>t!          |           r/|j        d         dk    r|d         }|                    |d          }t          j        |          j        }t          |           r"|j        dk    r|                    dd          }n>t!          |           r/|j        d         dk    r|d         }|                    dd          }||fS )Npredict_probadecision_functionautozCThe estimator has no predict_proba and no decision_function method.z*The estimator has no predict_proba method.z.The estimator has no decision_function method.)column_indexerr   r#   z0'estimator' parameter must be a fitted estimatorr	   r@   r   )r   predictgetattrr*   copyr,   r   r3   r-   meanr   r0   arrayTrB   rC   r
   )rD   rE   rF   r4   response_methodpredictionsrG   prediction_methodrJ   rK   X_eval
new_valuesivariableprede	n_sampless                    r    _partial_dependence_bruter^      s   K C SK_d;;#C)<dCCf$$ - B1B #o55 & 
 $&(( 0   !O33 !MNNN !QRRRVVXXF X X
$X.. 	I 	IKAxAxHHHHH	X %$V,,Dt$$$ ''1(=(=(=>>>> 	X 	X 	XOPPVWW	X 
I (;'')KC 9[-22!)))R88	s		 9 1! 4 9 9 "!n!)))R88 8$899;C C16!;;3;;ArBB	s		 C 4 :1 = B B  4A63;;ArBB,,s   A	D
D=(D88D=rL   )g?gffffff?d   average)categorical_featuresfeature_namesrT   r5   r7   methodkindc                J	   t          |            t          |           st          |           st          d          t          |           r4t	          | j        d         t          j                  rt          d          t          |d          s+t          j
        |          st          |dt                    }d}
||
vr6t          d                    |d	                    |
                              t          |           r|d
k    rt          d          d}||vr6t          d                    |d	                    |                              |	dk    r|dk    rt          d          d}|d
k    rFt	          | t                    r
| j        d}n't	          | t"          t$          t&          f          rd}nd}|dk    rt	          | t          t"          t$          t&          f          s7d}t          d                    d	                    |                              |d
k    rd}|dk    r"t          d                    |                    t)          |d          dk    rWt          j        t          j        |d                    r0t          d                    |j        d         dz
                      t          j        t3          ||          t          j        d                                          }t9          |          |j        d         }dgt;          |          z  }nt          j        d          j        j         dk    r5j!        |k    rt          d j!         d!| d"          fd#|D             }nCj        j         d$v rfd%D             fd&|D             }nt          d'j         d(          tE          tG          ||d)          |||          \  }}|dk    r;tI          | ||||          \  }} |j%        d*|j        d         gd+ |D             R  }ntM          | ||          } |j%        d*gd, |D             R  }|	dk    rtO          ||-          S |	d.k    rtO          ||/          S tO          |||0          S )1a  Partial dependence of ``features``.

    Partial dependence of a feature (or a set of features) corresponds to
    the average response of an estimator for each possible value of the
    feature.

    Read more in the :ref:`User Guide <partial_dependence>`.

    .. warning::

        For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
        :class:`~sklearn.ensemble.GradientBoostingRegressor`, the
        `'recursion'` method (used by default) will not account for the `init`
        predictor of the boosting process. In practice, this will produce
        the same values as `'brute'` up to a constant offset in the target
        response, provided that `init` is a constant estimator (which is the
        default). However, if `init` is not a constant estimator, the
        partial dependence values are incorrect for `'recursion'` because the
        offset will be sample-dependent. It is preferable to use the `'brute'`
        method. Note that this only applies to
        :class:`~sklearn.ensemble.GradientBoostingClassifier` and
        :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
        :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
        :class:`~sklearn.ensemble.HistGradientBoostingRegressor`.

    Parameters
    ----------
    estimator : BaseEstimator
        A fitted estimator object implementing :term:`predict`,
        :term:`predict_proba`, or :term:`decision_function`.
        Multioutput-multiclass classifiers are not supported.

    X : {array-like or dataframe} of shape (n_samples, n_features)
        ``X`` is used to generate a grid of values for the target
        ``features`` (where the partial dependence will be evaluated), and
        also to generate values for the complement features when the
        `method` is 'brute'.

    features : array-like of {int, str}
        The feature (e.g. `[0]`) or pair of interacting features
        (e.g. `[(0, 1)]`) for which the partial dependency should be computed.

    categorical_features : array-like of shape (n_features,) or shape             (n_categorical_features,), dtype={bool, int, str}, default=None
        Indicates the categorical features.

        - `None`: no feature will be considered categorical;
        - boolean array-like: boolean mask of shape `(n_features,)`
            indicating which features are categorical. Thus, this array has
            the same shape has `X.shape[1]`;
        - integer or string array-like: integer indices or strings
            indicating categorical features.

        .. versionadded:: 1.2

    feature_names : array-like of shape (n_features,), dtype=str, default=None
        Name of each feature; `feature_names[i]` holds the name of the feature
        with index `i`.
        By default, the name of the feature corresponds to their numerical
        index for NumPy array and their column name for pandas dataframe.

        .. versionadded:: 1.2

    response_method : {'auto', 'predict_proba', 'decision_function'},             default='auto'
        Specifies whether to use :term:`predict_proba` or
        :term:`decision_function` as the target response. For regressors
        this parameter is ignored and the response is always the output of
        :term:`predict`. By default, :term:`predict_proba` is tried first
        and we revert to :term:`decision_function` if it doesn't exist. If
        ``method`` is 'recursion', the response is always the output of
        :term:`decision_function`.

    percentiles : tuple of float, default=(0.05, 0.95)
        The lower and upper percentile used to create the extreme values
        for the grid. Must be in [0, 1].

    grid_resolution : int, default=100
        The number of equally spaced points on the grid, for each target
        feature.

    method : {'auto', 'recursion', 'brute'}, default='auto'
        The method used to calculate the averaged predictions:

        - `'recursion'` is only supported for some tree-based estimators
          (namely
          :class:`~sklearn.ensemble.GradientBoostingClassifier`,
          :class:`~sklearn.ensemble.GradientBoostingRegressor`,
          :class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
          :class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
          :class:`~sklearn.tree.DecisionTreeRegressor`,
          :class:`~sklearn.ensemble.RandomForestRegressor`,
          ) when `kind='average'`.
          This is more efficient in terms of speed.
          With this method, the target response of a
          classifier is always the decision function, not the predicted
          probabilities. Since the `'recursion'` method implicitly computes
          the average of the Individual Conditional Expectation (ICE) by
          design, it is not compatible with ICE and thus `kind` must be
          `'average'`.

        - `'brute'` is supported for any estimator, but is more
          computationally intensive.

        - `'auto'`: the `'recursion'` is used for estimators that support it,
          and `'brute'` is used otherwise.

        Please see :ref:`this note <pdp_method_differences>` for
        differences between the `'brute'` and `'recursion'` method.

    kind : {'average', 'individual', 'both'}, default='average'
        Whether to return the partial dependence averaged across all the
        samples in the dataset or one value per sample or both.
        See Returns below.

        Note that the fast `method='recursion'` option is only available for
        `kind='average'`. Computing individual dependencies requires using the
        slower `method='brute'` option.

        .. versionadded:: 0.24

    Returns
    -------
    predictions : :class:`~sklearn.utils.Bunch`
        Dictionary-like object, with the following attributes.

        individual : ndarray of shape (n_outputs, n_instances,                 len(values[0]), len(values[1]), ...)
            The predictions for all the points in the grid for all
            samples in X. This is also known as Individual
            Conditional Expectation (ICE)

        average : ndarray of shape (n_outputs, len(values[0]),                 len(values[1]), ...)
            The predictions for all the points in the grid, averaged
            over all samples in X (or over the training data if
            ``method`` is 'recursion').
            Only available when ``kind='both'``.

        values : seq of 1d ndarrays
            The values with which the grid has been created. The generated
            grid is a cartesian product of the arrays in ``values``.
            ``len(values) == len(features)``. The size of each array
            ``values[j]`` is either ``grid_resolution``, or the number of
            unique values in ``X[:, j]``, whichever is smaller.

        ``n_outputs`` corresponds to the number of classes in a multi-class
        setting, or to the number of tasks for multi-output regression.
        For classical regression and binary classification ``n_outputs==1``.
        ``n_values_feature_j`` corresponds to the size ``values[j]``.

    See Also
    --------
    PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
    PartialDependenceDisplay : Partial Dependence visualization.

    Examples
    --------
    >>> X = [[0, 0, 2], [1, 0, 0]]
    >>> y = [0, 1]
    >>> from sklearn.ensemble import GradientBoostingClassifier
    >>> gb = GradientBoostingClassifier(random_state=0).fit(X, y)
    >>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1),
    ...                    grid_resolution=2) # doctest: +SKIP
    (array([[-4.52...,  4.52...]]), [array([ 0.,  1.])])
    z5'estimator' must be a fitted regressor or classifier.r   z3Multiclass-multioutput estimators are not supported	__array__z	allow-nan)force_all_finitedtype)rL   rJ   rK   zEresponse_method {} is invalid. Accepted response_method names are {}.z, rL   zKThe response_method parameter is ignored for regressors and must be 'auto'.)brute	recursionrL   z3method {} is invalid. Accepted method names are {}.r`   rj   zCThe 'recursion' method only applies when 'kind' is set to 'average'ri   N)GradientBoostingClassifierGradientBoostingRegressorHistGradientBoostingClassifierHistGradientBoostingRegressorrn   r   r   z[Only the following estimators support the 'recursion' method: {}. Try using method='brute'.rK   zUWith the 'recursion' method, the response_method must be 'decision_function'. Got {}.F)accept_sliceintzall features must be in [0, {}]r   C)rh   order)rP   bzeWhen `categorical_features` is a boolean array-like, the array should be of shape (n_features,). Got z elements while `X` contains z
 features.c                      g | ]
}|         S r   r   )r   idxra   s     r    
<listcomp>z&partial_dependence.<locals>.<listcomp>  s    TTTC237TTTr"   )rY   OUc                 2    g | ]}t          |           S ))rb   )r   )r   catrb   s     r    rv   z&partial_dependence.<locals>.<listcomp>
  s6     ( ( ( #3mDDD( ( (r"   c                     g | ]}|v S r   r   )r   ru   categorical_features_idxs     r    rv   z&partial_dependence.<locals>.<listcomp>  s,       47//  r"   zXExpected `categorical_features` to be an array-like of boolean, integer, or string. Got z	 instead.r#   r@   c                 (    g | ]}|j         d          S r   r0   r   vals     r    rv   z&partial_dependence.<locals>.<listcomp>&  s    ===scil===r"   c                 (    g | ]}|j         d          S r~   r   r   s     r    rv   z&partial_dependence.<locals>.<listcomp>0  s    ---scil---r"   )r`   r8   
individual)r   r8   )r`   r   r8   )(r   r
   r   r*   r(   classes_r-   ndarrayhasattrr   issparser   objectformatjoinr   initr   r   r   r   anylessr0   asarrayr   int32ravelr   r)   rR   rh   rd   sizer>   r   r^   rC   rH   r   )	estimatorr4   rF   ra   rb   rT   r5   r7   rc   rd   accepted_responsesaccepted_methodssupported_classes_recursionfeatures_indices
n_featuresr6   rE   r8   rG   rU   r|   s      ``               @r    r   r      s   f I)$$ RY(?(? RPQQQY PJy/A!/Dbj$Q$Q PNOOO A{## Gvq'9'9 GKvFFFG000f_dii8J.K.KLL
 
 	

 I 
?f#<#<
 
 	

 6%%%AHH		"233 
 
 	
 y[  U   i!566 	9>;Q FF%'<>ST
 
 	 !FFF$(%%	
 
 	+' 88>II9::9 9   f$$1O111//5vo/F/F  
 8%888EAA 6"'(A&&'' 	W>EEagajSTnUUVVVzAx((  egg  )M::MJ#3'7#8#88!x(<5III%*c11#(J66 .+0. . ". . .   UTTTCSTTTNN!',??( ( ( (/( ( ($   ;K  NN R,@,FR R R  
  q*333	 LD& ,Et-q/-
 -
)k *k)

==f===
 
 
  =t- 
  
 8/7
--f---   y1&AAAA			F;;;;("
 
 
 	
r"   )*__doc__collections.abcr   numpyr-   scipyr   scipy.stats.mstatsr   	_pd_utilsr   r   baser
   r   utils.extmathr   utilsr   r   r   r   r   r   utils.validationr   r   treer   ensembler   
exceptionsr   ensemble._gbr   2ensemble._hist_gradient_boosting.gradient_boostingr   __all__r>   rH   r^   r   r   r"   r    <module>r      s	   H H % $ $ $ $ $           ) ) ) ) ) ) ? ? ? ? ? ? ? ? . . . . . . . . % % % % % %       , , , , , , " " " " " "             ' ' ' ' ' ' ' ' ' ' ' ' . . . . . .       ( ( ( ( ( ( , , , , , , ' ' ' ' ' ' / / / / / /      
Y% Y% Y%x     S- S- S-v 	Y
 Y
 Y
 Y
 Y
 Y
 Y
r"   