
     hN                     T   d Z ddlZddlZddlZddlmZ ddlmZ ddl	m
Z
mZmZmZmZmZ ddlmZ g dZd&d
ZddddZdddddZddddZdddddZdddddZddddZd ZdddddZdddddZd Zd ZddddZdd d!Z dd d"Z!dddd#Z"ddddd$Z#dddd%Z$dS )'zMetrics to assess performance on regression task.

Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.

Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
    N)xlogy   )UndefinedMetricWarning)check_arraycheck_consistent_lengthcheck_scalar_num_samplescolumn_or_1d_check_sample_weight)_weighted_percentile)	max_errormean_absolute_errormean_squared_errormean_squared_log_errormedian_absolute_errormean_absolute_percentage_errormean_pinball_lossr2_scoreexplained_variance_scoremean_tweedie_deviancemean_poisson_deviancemean_gamma_devianced2_tweedie_scored2_pinball_scored2_absolute_error_scorenumericc                    t          | |           t          | d|          } t          |d|          }| j        dk    r|                     d          } |j        dk    r|                    d          }| j        d         |j        d         k    r9t          d                    | j        d         |j        d                             | j        d         }d}t          |t                    r(||vr#t          d                    ||                    n\|Zt          |d	          }|dk    rt          d
          |t          |          k    r!t          dt          |          |fz            |dk    rdnd}|| ||fS )aF  Check that y_true and y_pred belong to the same regression task.

    Parameters
    ----------
    y_true : array-like

    y_pred : array-like

    multioutput : array-like or string in ['raw_values', uniform_average',
        'variance_weighted'] or None
        None is accepted due to backward compatibility of r2_score().

    dtype : str or list, default="numeric"
        the dtype argument passed to check_array.

    Returns
    -------
    type_true : one of {'continuous', continuous-multioutput'}
        The type of the true target data, as output by
        'utils.multiclass.type_of_target'.

    y_true : array-like of shape (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples, n_outputs)
        Estimated target values.

    multioutput : array-like of shape (n_outputs) or string in ['raw_values',
        uniform_average', 'variance_weighted'] or None
        Custom output weights if ``multioutput`` is array-like or
        just the corresponding argument if ``multioutput`` is a
        correct keyword.
    F)	ensure_2ddtype   )r    z<y_true and y_pred have different number of output ({0}!={1}))
raw_valuesuniform_averagevariance_weightedzIAllowed 'multioutput' string values are {}. You provided multioutput={!r}N)r   z5Custom weights are useful only in multi-output cases.z?There must be equally many custom weights (%d) as outputs (%d).
continuouscontinuous-multioutput)
r   r   ndimreshapeshape
ValueErrorformat
isinstancestrlen)y_truey_predmultioutputr   	n_outputsallowed_multioutput_stry_types          W/var/www/html/Sam_Eipo/venv/lib/python3.11/site-packages/sklearn/metrics/_regression.py_check_reg_targetsr6   B   s   D FF+++5>>>F5>>>F{a(({a((|A&,q/))JQQQa 
 
 	
 QIT+s## 555006+[1 1   6 
	 !+???>>TUUU#k****Q{##Y/0   '!^^\\1IF66;..    r#   )sample_weightr1   c                   t          | ||          \  }} }}t          | ||           t          j        t          j        || z
            |d          }t          |t                    r|dk    r|S |dk    rd}t          j        ||          S )a  Mean absolute error regression loss.

    Read more in the :ref:`User Guide <mean_absolute_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'}  or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        MAE output is non-negative floating point. The best value is 0.0.

    Examples
    --------
    >>> from sklearn.metrics import mean_absolute_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_absolute_error(y_true, y_pred)
    0.5
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> mean_absolute_error(y_true, y_pred)
    0.75
    >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
    array([0.5, 1. ])
    >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.85...
    r   weightsaxisr"   r#   Nr;   )r6   r   npaverageabsr,   r-   )r/   r0   r8   r1   r4   output_errorss         r5   r   r      s    n +=+ +'FFFK FFM:::Jrvfvo66TUVVVM+s## ,&&  ---K:m[9999r7         ?r8   alphar1   c                   t          | ||          \  }} }}t          | ||           | |z
  }|dk                        |j                  }||z  |z  d|z
  d|z
  z  |z  z
  }t	          j        ||d          }	t          |t                    r#|dk    r|	S |dk    rd}nt          d|z            t	          j        |	|          S )	a  Pinball loss for quantile regression.

    Read more in the :ref:`User Guide <pinball_loss>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    alpha : float, slope of the pinball loss, default=0.5,
        This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`,
        `alpha=0.95` is minimized by estimators of the 95th percentile.

    multioutput : {'raw_values', 'uniform_average'}  or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        The pinball loss output is a non-negative floating point. The best
        value is 0.0.

    Examples
    --------
    >>> from sklearn.metrics import mean_pinball_loss
    >>> y_true = [1, 2, 3]
    >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1)
    0.03...
    >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1)
    0.3...
    >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9)
    0.3...
    >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9)
    0.03...
    >>> mean_pinball_loss(y_true, y_true, alpha=0.1)
    0.0
    >>> mean_pinball_loss(y_true, y_true, alpha=0.9)
    0.0
    r   r    r:   r"   r#   NVmultioutput is expected to be 'raw_values' or 'uniform_average' but we got %r instead.r=   )	r6   r   astyper   r>   r?   r,   r-   r*   )
r/   r0   r8   rD   r1   r4   diffsignlossrA   s
             r5   r   r      s   z +=+ +'FFFK FFM:::F?DAIdj))D4<$!e)D!9D!@@DJt]CCCM+s## ,&&  ---KK)*   :m[9999r7   c                   t          | ||          \  }} }}t          | ||           t          j        t          j                  j        }t          j        || z
            t          j        t          j        |           |          z  }t          j        ||d          }t          |t                    r|dk    r|S |dk    rd}t          j        ||          S )ah
  Mean absolute percentage error (MAPE) regression loss.

    Note here that the output is not a percentage in the range [0, 100]
    and a value of 100 does not mean 100% but 1e2. Furthermore, the output
    can be arbitrarily high when `y_true` is small (which is specific to the
    metric) or when `abs(y_true - y_pred)` is large (which is common for most
    regression metrics). Read more in the
    :ref:`User Guide <mean_absolute_percentage_error>`.

    .. versionadded:: 0.24

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.
        If input is list then the shape must be (n_outputs,).

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute percentage error
        is returned for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        MAPE output is non-negative floating point. The best value is 0.0.
        But note that bad predictions can lead to arbitrarily large
        MAPE values, especially if some `y_true` values are very close to zero.
        Note that we return a large value instead of `inf` when `y_true` is zero.

    Examples
    --------
    >>> from sklearn.metrics import mean_absolute_percentage_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    0.3273...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    0.5515...
    >>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.6198...
    >>> # the value when some element of the y_true is zero is arbitrarily high because
    >>> # of the division by epsilon
    >>> y_true = [1., 0., 2.4, 7.]
    >>> y_pred = [1.2, 0.1, 2.4, 8.]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    112589990684262.48
    r   r:   r"   r#   Nr=   )r6   r   r>   finfofloat64epsr@   maximumr?   r,   r-   )r/   r0   r8   r1   r4   epsilonmaperA   s           r5   r   r   (  s    J +=+ +'FFFK FFM:::hrz""&G6&6/""RZv%H%HHDJt]CCCM+s## ,&&  ---K:m[9999r7   Tr8   r1   squaredc                ,   t          | ||          \  }} }}t          | ||           t          j        | |z
  dz  d|          }|st          j        |          }t          |t                    r|dk    r|S |dk    rd}t          j        ||          S )a  Mean squared error regression loss.

    Read more in the :ref:`User Guide <mean_squared_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    squared : bool, default=True
        If True returns MSE value, if False returns RMSE value.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import mean_squared_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_squared_error(y_true, y_pred)
    0.375
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_squared_error(y_true, y_pred, squared=False)
    0.612...
    >>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
    >>> y_pred = [[0, 2],[-1, 2],[8, -5]]
    >>> mean_squared_error(y_true, y_pred)
    0.708...
    >>> mean_squared_error(y_true, y_pred, squared=False)
    0.822...
    >>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
    array([0.41666667, 1.        ])
    >>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.825...
    r   r   r<   r;   r"   r#   Nr=   )r6   r   r>   r?   sqrtr,   r-   )r/   r0   r8   r1   rS   r4   rA   s          r5   r   r   ~  s    x +=+ +'FFFK FFM:::JA5A}UUUM /..+s## ,&&  ---K:m[9999r7   c                >   t          | ||          \  }} }}t          | ||           | dk                                     s|dk                                     rt          d          t	          t          j        |           t          j        |          |||          S )a  Mean squared logarithmic error regression loss.

    Read more in the :ref:`User Guide <mean_squared_log_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'

        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors when the input is of multioutput
            format.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.
    squared : bool, default=True
        If True returns MSLE (mean squared log error) value.
        If False returns RMSLE (root mean squared log error) value.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import mean_squared_log_error
    >>> y_true = [3, 5, 2.5, 7]
    >>> y_pred = [2.5, 5, 4, 8]
    >>> mean_squared_log_error(y_true, y_pred)
    0.039...
    >>> mean_squared_log_error(y_true, y_pred, squared=False)
    0.199...
    >>> y_true = [[0.5, 1], [1, 2], [7, 6]]
    >>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
    >>> mean_squared_log_error(y_true, y_pred)
    0.044...
    >>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
    array([0.00462428, 0.08377444])
    >>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.060...
    r   zSMean Squared Logarithmic Error cannot be used when targets contain negative values.rR   )r6   r   anyr*   r   r>   log1p)r/   r0   r8   r1   rS   r4   s         r5   r   r     s    t +=+ +'FFFK FFM:::
 
fqj--// 
/
 
 	

 

#   r7   )r1   r8   c                l   t          | ||          \  }} }}|,t          j        t          j        || z
            d          }n6t	          ||          }t          t          j        || z
            |          }t          |t                    r|dk    r|S |dk    rd}t          j        ||          S )a?  Median absolute error regression loss.

    Median absolute error output is non-negative floating point. The best value
    is 0.0. Read more in the :ref:`User Guide <median_absolute_error>`.

    Parameters
    ----------
    y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
        Estimated target values.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values. Array-like value defines
        weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

        .. versionadded:: 0.24

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

    Examples
    --------
    >>> from sklearn.metrics import median_absolute_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> median_absolute_error(y_true, y_pred)
    0.5
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> median_absolute_error(y_true, y_pred)
    0.75
    >>> median_absolute_error(y_true, y_pred, multioutput='raw_values')
    array([0.5, 1. ])
    >>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.85
    Nr   )r<   r8   r"   r#   r=   )	r6   r>   medianr@   r   r   r,   r-   r?   )r/   r0   r1   r8   r4   rA   s         r5   r   r     s    p +=+ +'FFFK 	"&&"9"9BBB,]FCC,F6F?##=
 
 
 +s## ,&&  ---K:m[9999r7   c                 ^   |dk    }|s	d| |z  z
  }n@| dk    }t          j        |g          }||z  }d| |         ||         z  z
  ||<   d||| z  <   t          |t                    r0|dk    r|S |dk    rd}	n!|dk    r|}	t          j        |          sd}	n|}	t          j        ||	          S )	zCCommon part used by explained variance score and :math:`R^2` score.r   r            r"   r#   Nr$   r=   )r>   onesr,   r-   rX   r?   )
	numeratordenominatorr2   r1   force_finitenonzero_denominatoroutput_scoresnonzero_numeratorvalid_scoreavg_weightss
             r5   _assemble_r2_explained_variancerh   g  s   
 &* FY45%N ,,),==%&k"[%==&
k"
 CF'+>*>>?+s## ",&&  ---KK///%K6-.. # #!:m[9999r7   )r8   r1   rb   c                j   t          | ||          \  }} }}t          | ||           t          j        | |z
  |d          }t          j        | |z
  |z
  dz  |d          }t          j        | |d          }t          j        | |z
  dz  |d          }	t	          ||	| j        d         ||          S )a  Explained variance regression score function.

    Best possible score is 1.0, lower values are worse.

    In the particular case when ``y_true`` is constant, the explained variance
    score is not finite: it is either ``NaN`` (perfect predictions) or
    ``-Inf`` (imperfect predictions). To prevent such non-finite numbers to
    pollute higher-level experiments such as a grid search cross-validation,
    by default these cases are replaced with 1.0 (perfect predictions) or 0.0
    (imperfect predictions) respectively. If ``force_finite``
    is set to ``False``, this score falls back on the original :math:`R^2`
    definition.

    .. note::
       The Explained Variance score is similar to the
       :func:`R^2 score <r2_score>`, with the notable difference that it
       does not account for systematic offsets in the prediction. Most often
       the :func:`R^2 score <r2_score>` should be preferred.

    Read more in the :ref:`User Guide <explained_variance_score>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average', 'variance_weighted'} or             array-like of shape (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output scores.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of scores in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

        'variance_weighted' :
            Scores of all outputs are averaged, weighted by the variances
            of each individual output.

    force_finite : bool, default=True
        Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant
        data should be replaced with real numbers (``1.0`` if prediction is
        perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting
        for hyperparameters' search procedures (e.g. grid search
        cross-validation).

        .. versionadded:: 1.1

    Returns
    -------
    score : float or ndarray of floats
        The explained variance or ndarray if 'multioutput' is 'raw_values'.

    See Also
    --------
    r2_score :
        Similar metric, but accounting for systematic offsets in
        prediction.

    Notes
    -----
    This is not a symmetric function.

    Examples
    --------
    >>> from sklearn.metrics import explained_variance_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> explained_variance_score(y_true, y_pred)
    0.957...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
    0.983...
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2]
    >>> explained_variance_score(y_true, y_pred)
    1.0
    >>> explained_variance_score(y_true, y_pred, force_finite=False)
    nan
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2 + 1e-8]
    >>> explained_variance_score(y_true, y_pred)
    0.0
    >>> explained_variance_score(y_true, y_pred, force_finite=False)
    -inf
    r   r:   r   r    r`   ra   r2   r1   rb   )r6   r   r>   r?   rh   r)   )
r/   r0   r8   r1   rb   r4   
y_diff_avgr`   
y_true_avgra   s
             r5   r   r     s    N +=+ +'FFFK FFM:::FVO]KKKJ
	&:	%!+]  I FMBBBJ*fz1a7UVWWWK*,q/!   r7   c                0   t          | ||          \  }} }}t          | ||           t          |          dk     r+d}t          j        |t
                     t          d          S |&t          |          }|ddt          j	        f         }nd}|| |z
  dz  z  
                    dt          j                  }|| t          j        | d|          z
  dz  z  
                    dt          j                  }	t          ||	| j        d	         ||
          S )aX  :math:`R^2` (coefficient of determination) regression score function.

    Best possible score is 1.0 and it can be negative (because the
    model can be arbitrarily worse). In the general case when the true y is
    non-constant, a constant model that always predicts the average y
    disregarding the input features would get a :math:`R^2` score of 0.0.

    In the particular case when ``y_true`` is constant, the :math:`R^2` score
    is not finite: it is either ``NaN`` (perfect predictions) or ``-Inf``
    (imperfect predictions). To prevent such non-finite numbers to pollute
    higher-level experiments such as a grid search cross-validation, by default
    these cases are replaced with 1.0 (perfect predictions) or 0.0 (imperfect
    predictions) respectively. You can set ``force_finite`` to ``False`` to
    prevent this fix from happening.

    Note: when the prediction residuals have zero mean, the :math:`R^2` score
    is identical to the
    :func:`Explained Variance score <explained_variance_score>`.

    Read more in the :ref:`User Guide <r2_score>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average', 'variance_weighted'},             array-like of shape (n_outputs,) or None, default='uniform_average'

        Defines aggregating of multiple output scores.
        Array-like value defines weights used to average scores.
        Default is "uniform_average".

        'raw_values' :
            Returns a full set of scores in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

        'variance_weighted' :
            Scores of all outputs are averaged, weighted by the variances
            of each individual output.

        .. versionchanged:: 0.19
            Default value of multioutput is 'uniform_average'.

    force_finite : bool, default=True
        Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant
        data should be replaced with real numbers (``1.0`` if prediction is
        perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting
        for hyperparameters' search procedures (e.g. grid search
        cross-validation).

        .. versionadded:: 1.1

    Returns
    -------
    z : float or ndarray of floats
        The :math:`R^2` score or ndarray of scores if 'multioutput' is
        'raw_values'.

    Notes
    -----
    This is not a symmetric function.

    Unlike most other scores, :math:`R^2` score may be negative (it need not
    actually be the square of a quantity R).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

    References
    ----------
    .. [1] `Wikipedia entry on the Coefficient of determination
            <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_

    Examples
    --------
    >>> from sklearn.metrics import r2_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> r2_score(y_true, y_pred)
    0.948...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> r2_score(y_true, y_pred,
    ...          multioutput='variance_weighted')
    0.938...
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 2, 3]
    >>> r2_score(y_true, y_pred)
    1.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [2, 2, 2]
    >>> r2_score(y_true, y_pred)
    0.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [3, 2, 1]
    >>> r2_score(y_true, y_pred)
    -3.0
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2]
    >>> r2_score(y_true, y_pred)
    1.0
    >>> r2_score(y_true, y_pred, force_finite=False)
    nan
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2 + 1e-8]
    >>> r2_score(y_true, y_pred)
    0.0
    >>> r2_score(y_true, y_pred, force_finite=False)
    -inf
    r   z9R^2 score is not well-defined with less than two samples.nanNg      ?r   )r<   r   rU   r    rj   )r6   r   r	   warningswarnr   floatr
   r>   newaxissumrM   r?   rh   r)   )
r/   r0   r8   r1   rb   r4   msgweightr`   ra   s
             r5   r   r     s/   ~ +=+ +'FFFK FFM:::FaIc1222U|| $]33qqq"*}-6F?q00551BJ5OOI&2:f1mLLLLQRRR	cq
c##  +,q/!   r7   c                     t          | |d          \  }} }}|dk    rt          d          t          j        t          j        | |z
                      S )ab  
    The max_error metric calculates the maximum residual error.

    Read more in the :ref:`User Guide <max_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    Returns
    -------
    max_error : float
        A positive floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import max_error
    >>> y_true = [3, 2, 7, 1]
    >>> y_pred = [4, 2, 7, 1]
    >>> max_error(y_true, y_pred)
    1
    Nr&   z&Multioutput not supported in max_error)r6   r*   r>   maxr@   )r/   r0   r4   _s       r5   r   r     sW    6 !3664 H HFFFA)))ABBB6"&&))***r7   c                    |}|dk     r|dt          j        t          j        | d          d|z
            d|z
  d|z
  z  z  | t          j        |d|z
            z  d|z
  z  z
  t          j        |d|z
            d|z
  z  z   z  }n|dk    r	| |z
  dz  }n|dk    rdt          | | |z            | z
  |z   z  }n|dk    r$dt          j        || z            | |z  z   dz
  z  }nhdt          j        | d|z
            d|z
  d|z
  z  z  | t          j        |d|z
            z  d|z
  z  z
  t          j        |d|z
            d|z
  z  z   z  }t          j        ||          S )z&Mean Tweedie deviance regression loss.r   r   r    r=   )r>   powerrO   r   logr?   )r/   r0   r8   rz   pdevs         r5   _mean_tweedie_deviancer~     s   A1uuHRZ**AE22q1uQ6GHrxA...!a%89hvq1u%%Q/0

 
a1$	
a5&11F:VCD	
a26&6/**Vf_<q@AHVQU##A!a%'89rxA...!a%89hvq1u%%Q/0
 :c=1111r7   r8   rz   c                V   t          | |dt          j        t          j        g          \  }} }}|dk    rt	          d          t          | ||           |%t          |          }|ddt          j        f         }t          |dt          j
                  }d| d}|d	k     r+|d	k                                    rt	          |d
z             n|d	k    rnd	|cxk     rdk     rn nt	          d          d|cxk    rdk     rFn nC| d	k                                     s|d	k                                    rt	          |dz             nP|dk    rC| d	k                                    s|d	k                                    rt	          |dz             nt          t          | |||          S )a[  Mean Tweedie deviance regression loss.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    power : float, default=0
        Tweedie power parameter. Either power <= 0 or power >= 1.

        The higher `p` the less weight is given to extreme
        deviations between true and predicted targets.

        - power < 0: Extreme stable distribution. Requires: y_pred > 0.
        - power = 0 : Normal distribution, output corresponds to
          mean_squared_error. y_true and y_pred can be any real numbers.
        - power = 1 : Poisson distribution. Requires: y_true >= 0 and
          y_pred > 0.
        - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
          and y_pred > 0.
        - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
        - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
          and y_pred > 0.
        - otherwise : Positive stable distribution. Requires: y_true > 0
          and y_pred > 0.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_tweedie_deviance
    >>> y_true = [2, 0, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_tweedie_deviance(y_true, y_pred, power=1)
    1.4260...
    Nr   r&   z2Multioutput not supported in mean_tweedie_deviancerz   )nametarget_typez'Mean Tweedie deviance error with power=z can only be used on r   zstrictly positive y_pred.r    z;Tweedie deviance is only defined for power<=0 and power>=1.r   z,non-negative y and strictly positive y_pred.zstrictly positive y and y_pred.r   )r6   r>   rM   float32r*   r   r
   rr   r   numbersRealrX   r~   )r/   r0   r8   rz   r4   rx   r|   messages           r5   r   r     s   ` !3RZ$<! ! !FFFA )))MNNNFFM::: $]33%aaam4L	 	 	A QPPPG1uuaK 	DW'BBCCC	D	
a	
QVWWW	
a!QJ 	W&A+!2!2!4!4 	WW'UUVVV	W	
aaK 	J6Q;"3"3"5"5 	JW'HHIII	J !m5   r7   r[   c                (    t          | ||d          S )ad  Mean Poisson deviance regression loss.

    Poisson deviance is equivalent to the Tweedie deviance with
    the power parameter `power=1`.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values. Requires y_true >= 0.

    y_pred : array-like of shape (n_samples,)
        Estimated target values. Requires y_pred > 0.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_poisson_deviance
    >>> y_true = [2, 0, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_poisson_deviance(y_true, y_pred)
    1.4260...
    r    r   r   r/   r0   r8   s      r5   r   r   G  s    @ !}TUVVVVr7   c                (    t          | ||d          S )a  Mean Gamma deviance regression loss.

    Gamma deviance is equivalent to the Tweedie deviance with
    the power parameter `power=2`. It is invariant to scaling of
    the target variable, and measures relative errors.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values. Requires y_true > 0.

    y_pred : array-like of shape (n_samples,)
        Estimated target values. Requires y_pred > 0.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_gamma_deviance
    >>> y_true = [2, 0.5, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_gamma_deviance(y_true, y_pred)
    1.0568...
    r   r   r   r   s      r5   r   r   j  s    B !}TUVVVVr7   c                   t          | |dt          j        t          j        g          \  }} }}|dk    rt	          d          t          |          dk     r+d}t          j        |t                     t          d          S t          j
        |           t          j
        |          }} t          | |||          }t          j        | |	          }t          | |||          }	d
||	z  z
  S )a
  D^2 regression score function, fraction of Tweedie deviance explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical mean of `y_true` as
    constant prediction, disregarding the input features, gets a D^2 score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.0

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), optional
        Sample weights.

    power : float, default=0
        Tweedie power parameter. Either power <= 0 or power >= 1.

        The higher `p` the less weight is given to extreme
        deviations between true and predicted targets.

        - power < 0: Extreme stable distribution. Requires: y_pred > 0.
        - power = 0 : Normal distribution, output corresponds to r2_score.
          y_true and y_pred can be any real numbers.
        - power = 1 : Poisson distribution. Requires: y_true >= 0 and
          y_pred > 0.
        - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
          and y_pred > 0.
        - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
        - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
          and y_pred > 0.
        - otherwise : Positive stable distribution. Requires: y_true > 0
          and y_pred > 0.

    Returns
    -------
    z : float or ndarray of floats
        The D^2 score.

    Notes
    -----
    This is not a symmetric function.

    Like R^2, D^2 score may be negative (it need not actually be the square of
    a quantity D).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

    References
    ----------
    .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_tweedie_score
    >>> y_true = [0.5, 1, 2.5, 7]
    >>> y_pred = [1, 1, 5, 3.5]
    >>> d2_tweedie_score(y_true, y_pred)
    0.285...
    >>> d2_tweedie_score(y_true, y_pred, power=1)
    0.487...
    >>> d2_tweedie_score(y_true, y_pred, power=2)
    0.630...
    >>> d2_tweedie_score(y_true, y_true, power=2)
    1.0
    Nr   r&   z-Multioutput not supported in d2_tweedie_scorer   9D^2 score is not well-defined with less than two samples.rn   r   r=   r    )r6   r>   rM   r   r*   r	   ro   rp   r   rq   squeezer   r?   r~   )
r/   r0   r8   rz   r4   rx   rt   r`   y_avgra   s
             r5   r   r     s   X !3RZ$<! ! !FFFA )))HIIIFaIc1222U||Z''F););FF%m5  I Jv}555E(]%  K y;&&&r7   c                h   t          | ||          \  }} }}t          | ||           t          |          dk     r+d}t          j        |t
                     t          d          S t          | |||d          }|=t          j	        t          j
        | |dz  d	          t          |           d
f          }nGt          ||           }t          j	        t          | ||dz            t          |           d
f          }t          | |||d          }	|dk    }
|	dk    }|
|z  }t          j        | j        d
                   }d
||         |	|         z  z
  ||<   d||
| z  <   t!          |t"                    r#|dk    r|S |dk    rd}nt%          d|z            |}t          j        ||          S )u
  
    :math:`D^2` regression score function, fraction of pinball loss explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical alpha-quantile of
    `y_true` as constant prediction, disregarding the input features,
    gets a :math:`D^2` score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.1

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), optional
        Sample weights.

    alpha : float, default=0.5
        Slope of the pinball deviance. It determines the quantile level alpha
        for which the pinball deviance and also D2 are optimal.
        The default `alpha=0.5` is equivalent to `d2_absolute_error_score`.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

    Returns
    -------
    score : float or ndarray of floats
        The :math:`D^2` score with a pinball deviance
        or ndarray of scores if `multioutput='raw_values'`.

    Notes
    -----
    Like :math:`R^2`, :math:`D^2` score may be negative
    (it need not actually be the square of a quantity D).

    This metric is not well-defined for a single point and will return a NaN
    value if n_samples is less than two.

     References
    ----------
    .. [1] Eq. (7) of `Koenker, Roger; Machado, José A. F. (1999).
           "Goodness of Fit and Related Inference Processes for Quantile Regression"
           <http://dx.doi.org/10.1080/01621459.1999.10473882>`_
    .. [2] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_pinball_score
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 3, 3]
    >>> d2_pinball_score(y_true, y_pred)
    0.5
    >>> d2_pinball_score(y_true, y_pred, alpha=0.9)
    0.772...
    >>> d2_pinball_score(y_true, y_pred, alpha=0.1)
    -1.045...
    >>> d2_pinball_score(y_true, y_true, alpha=0.1)
    1.0
    r   r   rn   r"   rC   Nd   r   )qr<   r    )r8   
percentiler^   r#   rF   r=   )r6   r   r	   ro   rp   r   rq   r   r>   tiler   r.   r   r   r_   r)   r,   r-   r*   r?   )r/   r0   r8   rD   r1   r4   rt   r`   
y_quantilera   re   rc   rf   rd   rg   s                  r5   r   r     s    ^ +=+ +'FFFK FFM:::FaIc1222U||!#   I WM&ECKa8883v;;:J
 


 -]FCCW m   [[!	
 

 $#   K "Q%*#&99KGFLO,,M!"i&<{;?W&W!XM+>AM#':&::;+s## ",&&  ---KK)*   ":m[9999r7   c                *    t          | ||d|          S )a
  
    :math:`D^2` regression score function,     fraction of absolute error explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical median of `y_true`
    as constant prediction, disregarding the input features,
    gets a :math:`D^2` score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.1

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), optional
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

    Returns
    -------
    score : float or ndarray of floats
        The :math:`D^2` score with an absolute error deviance
        or ndarray of scores if 'multioutput' is 'raw_values'.

    Notes
    -----
    Like :math:`R^2`, :math:`D^2` score may be negative
    (it need not actually be the square of a quantity D).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

     References
    ----------
    .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_absolute_error_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> d2_absolute_error_score(y_true, y_pred)
    0.764...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> d2_absolute_error_score(y_true, y_pred, multioutput='uniform_average')
    0.691...
    >>> d2_absolute_error_score(y_true, y_pred, multioutput='raw_values')
    array([0.8125    , 0.57142857])
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 2, 3]
    >>> d2_absolute_error_score(y_true, y_pred)
    1.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [2, 2, 2]
    >>> d2_absolute_error_score(y_true, y_pred)
    0.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [3, 2, 1]
    >>> d2_absolute_error_score(y_true, y_pred)
    -1.0
    rB   rC   )r   )r/   r0   r8   r1   s       r5   r   r     s'    h m3K   r7   )r   )%__doc__r   ro   numpyr>   scipy.specialr   
exceptionsr   utils.validationr   r   r   r	   r
   r   utils.statsr   __ALL__r6   r   r   r   r   r   r   rh   r   r   r   r~   r   r   r   r   r   r    r7   r5   <module>r      sF   8             / / / / / /                / . . . . .  &H/ H/ H/ H/X &*7HC: C: C: C: C:N &*BSR: R: R: R: R:l &*7HS: S: S: S: S:n &*7HRVL: L: L: L: L:` &*7HRVK K K K K^ $5DI: I: I: I: I:X): ): ):` !z z z z zB !Z Z Z Z Zz+ + +B2 2 2: <@q Y Y Y Y Yx <@  W  W  W  W  WF :> !W !W !W !W !WH 7;! a' a' a' a' a'J &*BSN: N: N: N: N:d &*7HV V V V V V Vr7   