o
    tBh{                  
   @   s*  d Z ddlmZ ddlmZ ddlZddlmZmZ ddl	m
Z
 ddlmZ dd	lmZ dd
lmZ G dd dedZG dd deedZG dd deZG dd deZG dd deZG dd deZG dd deedZG dd deZG dd deZG dd deZeeeeeedded 	ZdS )!zZLosses and corresponding default initial estimators for gradient boosting
decision trees.
    )ABCMeta)abstractmethodN)expit	logsumexp   )	TREE_LEAF)_weighted_percentileDummyClassifierDummyRegressorc                   @   sd   e Zd ZdZdZdd Zdd Zeddd	Zed
d Z			dddZ
edd Zedd ZdS )LossFunctionaM  Abstract base class for various loss functions.

    Parameters
    ----------
    n_classes : int
        Number of classes.

    Attributes
    ----------
    K : int
        The number of regression trees to be induced;
        1 for regression and binary classification;
        ``n_classes`` for multi-class classification.
    Fc                 C   s
   || _ d S N)Kself	n_classes r   r/var/www/html/riverr-enterprise-integrations-main/venv/lib/python3.10/site-packages/sklearn/ensemble/_gb_losses.py__init__#      
zLossFunction.__init__c                 C   s   t  )z-Default ``init`` estimator for loss function.)NotImplementedErrorr   r   r   r   init_estimator&   s   zLossFunction.init_estimatorNc                 C      dS )ad  Compute the loss.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves).

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nr   r   yraw_predictionssample_weightr   r   r   __call__*       zLossFunction.__call__c                 K   r   )N  Compute the negative gradient.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        Nr   r   r   r   kargsr   r   r   negative_gradient:   r    zLossFunction.negative_gradient皙?r   c
                 C   s   | |}
|
 }d|| < t|jtkd D ]}| |||||||dd|	f | q|dd|	f  ||jddddf j|
dd 7  < dS )az  Update the terminal regions (=leaves) of the given tree and
        updates the current predictions of the model. Traverses tree
        and invokes template method `_update_terminal_region`.

        Parameters
        ----------
        tree : tree.Tree
            The tree object.
        X : ndarray of shape (n_samples, n_features)
            The data array.
        y : ndarray of shape (n_samples,)
            The target labels.
        residual : ndarray of shape (n_samples,)
            The residuals (usually the negative gradient).
        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        sample_weight : ndarray of shape (n_samples,)
            The weight of each sample.
        sample_mask : ndarray of shape (n_samples,)
            The sample mask to be used.
        learning_rate : float, default=0.1
            Learning rate shrinks the contribution of each tree by
             ``learning_rate``.
        k : int, default=0
            The index of the estimator being updated.

        r   Naxis)	applycopynpwherechildren_leftr   _update_terminal_regionvaluetake)r   treeXr   residualr   r   sample_masklearning_ratekterminal_regionsmasked_terminal_regionsleafr   r   r   update_terminal_regionsH   s"   
)
&z$LossFunction.update_terminal_regionsc	           	      C   r   )z=Template method for updating terminal regions (i.e., leaves).Nr   	r   r1   r7   r9   r2   r   r3   r   r   r   r   r   r.      r    z$LossFunction._update_terminal_regionc                 C   r   )aL  Return the initial raw predictions.

        Parameters
        ----------
        X : ndarray of shape (n_samples, n_features)
            The data array.
        estimator : object
            The estimator to use to compute the predictions.

        Returns
        -------
        raw_predictions : ndarray of shape (n_samples, K)
            The initial raw predictions. K is equal to 1 for binary
            classification and regression, and equal to the number of classes
            for multiclass classification. ``raw_predictions`` is casted
            into float64.
        Nr   )r   r2   	estimatorr   r   r   get_init_raw_predictions   s   z%LossFunction.get_init_raw_predictionsr   r%   r   )__name__
__module____qualname____doc__is_multi_classr   r   r   r   r$   r:   r.   r=   r   r   r   r   r      s     

A
r   )	metaclassc                       s0   e Zd ZdZ fddZdd Zdd Z  ZS )RegressionLossFunctionz)Base class for regression loss functions.c                    s   t  jdd d S )N   r   )superr   r   	__class__r   r   r         zRegressionLossFunction.__init__c                 C       t |dr
t |dstddS )zMake sure estimator has the required fit and predict methods.

        Parameters
        ----------
        estimator : object
            The init estimator to check.
        fitpredictzNThe init parameter must be a valid estimator and support both fit and predict.Nhasattr
ValueErrorr   r<   r   r   r   check_init_estimator   
   z+RegressionLossFunction.check_init_estimatorc                 C   s   | |}|ddtjS )Nr&   rF   )rN   reshapeastyper+   float64)r   r2   r<   predictionsr   r   r   r=      s   
z/RegressionLossFunction.get_init_raw_predictions)r?   r@   rA   rB   r   rS   r=   __classcell__r   r   rI   r   rE      s
    rE   c                   @   s@   e Zd ZdZdd ZdddZdd Z			
dddZdd ZdS )LeastSquaresErrorzLoss function for least squares (LS) estimation.
    Terminal regions do not need to be updated for least squares.

    Parameters
    ----------
    n_classes : int
        Number of classes.
    c                 C   
   t ddS )Nmeanstrategyr   r   r   r   r   r      r   z LeastSquaresError.init_estimatorNc                 C   sD   |du rt ||  d S d|  t |||  d   S )ar  Compute the least squares loss.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves).

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nr   rF   )r+   r\   ravelsumr   r   r   r   r      s   zLeastSquaresError.__call__c                 K   s   ||   S )aT  Compute half of the negative gradient.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples,)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        r_   r"   r   r   r   r$      s   z#LeastSquaresError.negative_gradientr%   r   c
           
      C   s*   |dd|	f  || |  7  < dS )a  Least squares does not need to update terminal regions.

        But it has to update the predictions.

        Parameters
        ----------
        tree : tree.Tree
            The tree object.
        X : ndarray of shape (n_samples, n_features)
            The data array.
        y : ndarray of shape (n_samples,)
            The target labels.
        residual : ndarray of shape (n_samples,)
            The residuals (usually the negative gradient).
        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        sample_weight : ndarray of shape (n,)
            The weight of each sample.
        sample_mask : ndarray of shape (n,)
            The sample mask to be used.
        learning_rate : float, default=0.1
            Learning rate shrinks the contribution of each tree by
             ``learning_rate``.
        k : int, default=0
            The index of the estimator being updated.
        N)rN   r_   )
r   r1   r2   r   r3   r   r   r4   r5   r6   r   r   r   r:      s   *(z)LeastSquaresError.update_terminal_regionsc	           	      C   s   d S r   r   r;   r   r   r   r.   "  s   z)LeastSquaresError._update_terminal_regionr   r>   )	r?   r@   rA   rB   r   r   r$   r:   r.   r   r   r   r   rZ      s    	

*rZ   c                   @   s2   e Zd ZdZdd ZdddZdd Zd	d
 ZdS )LeastAbsoluteErrorzLoss function for least absolute deviation (LAD) regression.

    Parameters
    ----------
    n_classes : int
        Number of classes
    c                 C      t dddS Nquantile      ?r^   re   r   r   r   r   r   r   9     z!LeastAbsoluteError.init_estimatorNc              	   C   sF   |du rt ||   S d|  t |t ||    S )at  Compute the least absolute error.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves).

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        NrF   )r+   absr_   r\   r`   r   r   r   r   r   <  s   zLeastAbsoluteError.__call__c                 K   s   |  }d|| dk d S )a  Compute the negative gradient.

        1.0 if y - raw_predictions > 0.0 else -1.0

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        r   r   rF   ra   r"   r   r   r   r$   S  s   z$LeastAbsoluteError.negative_gradientc	                 C   sZ   t ||kd }	|j|	dd}|j|	dd|j|	dd }
t|
|dd|j|ddf< dS )z1LAD updates terminal regions to median estimates.r   r'   2   
percentileN)r+   r,   r0   r   r/   )r   r1   r7   r9   r2   r   r3   r   r   terminal_regiondiffr   r   r   r.   d  s   z*LeastAbsoluteError._update_terminal_regionr   )r?   r@   rA   rB   r   r   r$   r.   r   r   r   r   rb   0  s    
rb   c                       sF   e Zd ZdZd fdd	Zdd Zddd	Zdd
dZdd Z  Z	S )HuberLossFunctionah  Huber loss function for robust regression.

    M-Regression proposed in Friedman 2001.

    Parameters
    ----------
    alpha : float, default=0.9
        Percentile at which to extract score.

    References
    ----------
    J. Friedman, Greedy Function Approximation: A Gradient Boosting
    Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
    ?c                    s   t    || _d | _d S r   )rH   r   alphagammar   rq   rI   r   r   r     s   

zHuberLossFunction.__init__c                 C   rc   rd   r   r   r   r   r   r     rh   z HuberLossFunction.init_estimatorNc           
      C   s  |  }|| }| j}|du r,|du r tt|| jd }ntt||| jd }t||k}|du r^td|| d  }t|t||  |d   }|| |jd  }	|	S td||  || d  }t|||   t||  |d   }|| |  }	|	S )a  Compute the Huber loss.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nd   rf   r   r   )	r_   rr   r+   rl   ri   rq   r   r`   shape)
r   r   r   r   rn   rr   
gamma_masksq_losslin_losslossr   r   r   r     s2   "	zHuberLossFunction.__call__c           	      K   s   |  }|| }|du rtt|| jd }ntt||| jd }t||k}tj|jd ftjd}|| ||< |t	||   || < || _
|S )a  Compute the negative gradient.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nrt   r   dtype)r_   r+   rl   ri   rq   r   zerosru   rW   signrr   )	r   r   r   r   r#   rn   rr   rv   r3   r   r   r   r$     s   z#HuberLossFunction.negative_gradientc	              	   C   s   t ||kd }	|j|	dd}| j}
|j|	dd|j|	dd }t||dd}|| }|t t |t t ||
  |j	|df< d S )Nr   r'   rj   rk   )
r+   r,   r0   rr   r   r\   r}   minimumri   r/   )r   r1   r7   r9   r2   r   r3   r   r   rm   rr   rn   mediandiff_minus_medianr   r   r   r.     s   z)HuberLossFunction._update_terminal_regionrp   r   
r?   r@   rA   rB   r   r   r   r$   r.   rY   r   r   rI   r   ro   z  s    

)ro   c                       sD   e Zd ZdZd fdd	Zdd Zddd	Zd
d Zdd Z  Z	S )QuantileLossFunctionzLoss function for quantile regression.

    Quantile regression allows to estimate the percentiles
    of the conditional distribution of the target.

    Parameters
    ----------
    alpha : float, default=0.9
        The percentile.
    rp   c                    s   t    || _|d | _d S )Nrt   )rH   r   rq   rl   rs   rI   r   r   r     s   
zQuantileLossFunction.__init__c                 C   s   t d| jdS )Nre   rg   )r   rq   r   r   r   r   r      s   z#QuantileLossFunction.init_estimatorNc                 C   s   |  }|| }| j}||k}|du r-|||   d| ||     |jd  }|S |t|| ||   d| t||  ||     |  }|S )a  Compute the Quantile loss.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        NrF   r   )r_   rq   r`   ru   r+   )r   r   r   r   rn   rq   maskry   r   r   r   r     s   $ zQuantileLossFunction.__call__c                 K   s,   | j }| }||k}|| d| |   S )r!   rF   )rq   r_   )r   r   r   r#   rq   r   r   r   r   r$   "  s   z&QuantileLossFunction.negative_gradientc	                 C   s\   t ||kd }	|j|	dd|j|	dd }
|j|	dd}t|
|| j}||j|df< d S )Nr   r'   )r+   r,   r0   r   rl   r/   )r   r1   r7   r9   r2   r   r3   r   r   rm   rn   valr   r   r   r.   3  s   z,QuantileLossFunction._update_terminal_regionr   r   r   r   r   rI   r   r     s    
r   c                   @   s,   e Zd ZdZdd Zedd Zdd ZdS )	ClassificationLossFunctionz-Base class for classification loss functions.c                 C   r   )a  Template method to convert raw predictions into probabilities.

        Parameters
        ----------
        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        Returns
        -------
        probas : ndarray of shape (n_samples, K)
            The predicted probabilities.
        Nr   r   r   r   r   r   _raw_prediction_to_probaK  r    z3ClassificationLossFunction._raw_prediction_to_probac                 C   r   )a  Template method to convert raw predictions to decisions.

        Parameters
        ----------
        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        Returns
        -------
        encoded_predictions : ndarray of shape (n_samples, K)
            The predicted encoded labels.
        Nr   r   r   r   r   _raw_prediction_to_decisionZ  r    z6ClassificationLossFunction._raw_prediction_to_decisionc                 C   rL   )zMake sure estimator has fit and predict_proba methods.

        Parameters
        ----------
        estimator : object
            The init estimator to check.
        rM   predict_probazTThe init parameter must be a valid estimator and support both fit and predict_proba.NrO   rR   r   r   r   rS   j  rT   z/ClassificationLossFunction.check_init_estimatorN)r?   r@   rA   rB   r   r   r   rS   r   r   r   r   r   H  s    
r   c                       Z   e Zd ZdZ fddZdd ZdddZd	d
 Zdd Zdd Z	dd Z
dd Z  ZS )BinomialDeviancea  Binomial deviance loss function for binary classification.

    Binary classification is a special case; here, we only need to
    fit one tree instead of ``n_classes`` trees.

    Parameters
    ----------
    n_classes : int
        Number of classes.
    c                    .   |dkrt d| jj|t jdd d S Nr   z-{0:s} requires 2 classes; got {1:d} class(es)rF   rG   rQ   formatrJ   r?   rH   r   r   rI   r   r   r        zBinomialDeviance.__init__c                 C   r[   Npriorr]   r	   r   r   r   r   r     s   
zBinomialDeviance.init_estimatorNc              	   C   sX   |  }|du rdt|| td|  S d|  t||| td|   S )a  Compute the deviance (= 2 * negative log-likelihood).

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nr   )r_   r+   r\   	logaddexpr`   r   r   r   r   r     s   zBinomialDeviance.__call__c                 K   s   |t |  S )aP  Compute half of the negative gradient.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        )r   r_   r"   r   r   r   r$     s   z"BinomialDeviance.negative_gradientc	                 C   s   t ||kd }	|j|	dd}|j|	dd}|j|	dd}t || }
t |||  d| |  }t|dk rDd|j|ddf< dS |
| |j|ddf< dS )zMake a single Newton-Raphson step.

        our node estimate is given by:

            sum(w * (y - prob)) / sum(w * prob * (1 - prob))

        we take advantage that: y - prob = residual
        r   r'   rF   u?j/         N)r+   r,   r0   r`   ri   r/   r   r1   r7   r9   r2   r   r3   r   r   rm   	numeratordenominatorr   r   r   r.     s   z(BinomialDeviance._update_terminal_regionc                 C   sZ   t j|jd dft jd}t| |d d df< |d d df  |d d df 8  < |S )Nr   r   rz   rF   r+   onesru   rW   r   r_   r   r   probar   r   r   r     s   $z)BinomialDeviance._raw_prediction_to_probac                 C      |  |}tj|ddS NrF   r'   r   r+   argmaxr   r   r   r   r        
z,BinomialDeviance._raw_prediction_to_decisionc                 C   s`   | |}|d d df }ttjj}t||d| }t|d|  }|ddtj	S )NrF   r&   
r   r+   finfofloat32epscliplogrU   rV   rW   r   r2   r<   probasproba_pos_classr   r   r   r   r   r=     s   
z)BinomialDeviance.get_init_raw_predictionsr   r?   r@   rA   rB   r   r   r   r$   r.   r   r   r=   rY   r   r   rI   r   r   y  s    

!r   c                       s`   e Zd ZdZdZ fddZdd Zddd	ZdddZdd Z	dd Z
dd Zdd Z  ZS )MultinomialDeviancezMultinomial deviance loss function for multi-class classification.

    For multi-class classification we need to fit ``n_classes`` trees at
    each stage.

    Parameters
    ----------
    n_classes : int
        Number of classes.
    Tc                    s*   |dk rt d| jjt | d S )N   z#{0:s} requires more than 2 classes.r   r   rI   r   r   r     s
   zMultinomialDeviance.__init__c                 C   r[   r   r	   r   r   r   r   r     r   z"MultinomialDeviance.init_estimatorNc                 C   sj   t j|jd | jft jd}t| jD ]}||k|dd|f< qt jd|| jdd t|dd |dS )a  Compute the Multinomial deviance.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        r   rz   Nr&   rF   r'   )weights)	r+   r|   ru   r   rW   rangeaverager`   r   )r   r   r   r   Yr6   r   r   r   r     s   zMultinomialDeviance.__call__r   c              
   K   s,   |t t |dd|f t|dd  S )a  Compute negative gradient for the ``k``-th class.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.

        k : int, default=0
            The index of the class.
        NrF   r'   )r+   
nan_to_numexpr   )r   r   r   r6   kwargsr   r   r   r$   '  s    z%MultinomialDeviance.negative_gradientc	                 C   s   t ||kd }	|j|	dd}|j|	dd}|j|	dd}t || }
|
| jd | j 9 }
t |||  d| |  }t|dk rNd|j|ddf< dS |
| |j|ddf< dS )z"Make a single Newton-Raphson step.r   r'   rF   r   r   N)r+   r,   r0   r`   r   ri   r/   r   r   r   r   r.   :  s   z+MultinomialDeviance._update_terminal_regionc              	   C   s*   t t |t|ddd d t jf  S r   )r+   r   r   r   newaxisr   r   r   r   r   V  s
   z,MultinomialDeviance._raw_prediction_to_probac                 C   r   r   r   r   r   r   r   r   ]  r   z/MultinomialDeviance._raw_prediction_to_decisionc                 C   s@   | |}ttjj}t||d| }t|tj}|S )NrF   )	r   r+   r   r   r   r   r   rV   rW   )r   r2   r<   r   r   r   r   r   r   r=   a  s
   
z,MultinomialDeviance.get_init_raw_predictionsr   )r   )r?   r@   rA   rB   rC   r   r   r   r$   r.   r   r   r=   rY   r   r   rI   r   r     s    

r   c                       r   )ExponentialLossa  Exponential loss function for binary classification.

    Same loss as AdaBoost.

    Parameters
    ----------
    n_classes : int
        Number of classes.

    References
    ----------
    Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
    c                    r   r   r   r   rI   r   r   r   x  r   zExponentialLoss.__init__c                 C   r[   r   r	   r   r   r   r   r     r   zExponentialLoss.init_estimatorNc                 C   s\   |  }|du rttd| d  | S d|  t|td| d  |   S )a  Compute the exponential loss

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        N       @      ?r   rF   )r_   r+   r\   r   r`   r   r   r   r   r     s    zExponentialLoss.__call__c                 K   s$   d| d }|t | |   S )aU  Compute the residual (= negative gradient).

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        r   r   )r+   r   r_   )r   r   r   r#   y_r   r   r   r$     s   z!ExponentialLoss.negative_gradientc	                 C   s   t ||kd }	|j|	dd}|j|	dd}|j|	dd}d| d }
t |
| t |
 |  }t |t |
 |  }t|dk rPd|j|ddf< d S || |j|ddf< d S )Nr   r'   r   r   r   r   )r+   r,   r0   r`   r   ri   r/   )r   r1   r7   r9   r2   r   r3   r   r   rm   r   r   r   r   r   r   r.     s   z'ExponentialLoss._update_terminal_regionc                 C   s^   t j|jd dft jd}td|  |d d df< |d d df  |d d df 8  < |S )Nr   r   rz   r   rF   r   r   r   r   r   r     s   $z(ExponentialLoss._raw_prediction_to_probac                 C   s   |  dktS )Nr   )r_   rV   intr   r   r   r   r     rK   z+ExponentialLoss._raw_prediction_to_decisionc                 C   sd   | |}|d d df }ttjj}t||d| }dt|d|   }|ddtj	S )NrF   rf   r&   r   r   r   r   r   r=     s   
z(ExponentialLoss.get_init_raw_predictionsr   r   r   r   rI   r   r   i  s    

r   )	squared_errorlsabsolute_errorladhuberre   deviancelog_lossexponential)rB   abcr   r   numpyr+   scipy.specialr   r   
tree._treer   utils.statsr   dummyr
   r   r   rE   rZ   rb   ro   r   r   r   r   r   LOSS_FUNCTIONSr   r   r   r   <module>   s<     jJuY1}sv
