o
    tBh0                  
   @   sx  d Z ddlZddlZddlmZmZ ddlZddlm	Z	 ddl
mZmZmZ ddl
mZ ddl
mZ dd	lmZmZmZ dd
lmZ ddlmZ ddlmZ ddlmZmZ ddlmZ g dZeedkrmddlmZ  nddlm Z  dd Z!	d-ddZ"dd Z#d.ddZ$d d! Z%G d"d# d#eeeeeed$Z&G d%d& d&e&Z'G d'd( d(e&Z(G d)d* d*e&Z)G d+d, d,eeeZ*dS )/zG
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
    N)ABCMetaabstractmethod)svd   )BaseEstimatorRegressorMixinTransformerMixin)MultiOutputMixin) _ClassNamePrefixFeaturesOutMixin)check_arraycheck_scalarcheck_consistent_length)
sp_version)parse_version)svd_flip)check_is_fittedFLOAT_DTYPES)ConvergenceWarning)PLSCanonicalPLSRegressionPLSSVDz1.7)pinv)pinv2c              
   C   s   t | ddd\}}}|jj }ddd}t|||  t|j }t||k}|d d d |f }||d |  }t	t
t||d | S )NF)full_matricescheck_finiteg     @@g    .A)fd)r   dtypecharlowernpmaxfinfoepssum	transpose	conjugatedot)ausvhtfactorcondrank r0   w/var/www/html/riverr-enterprise-integrations-main/venv/lib/python3.10/site-packages/sklearn/cross_decomposition/_pls.py
_pinv2_old$   s   
 r2   A  ư>Fc              
      s  t | jj zt fdd|jD }W n ty& } ztd|d}~ww d}|dkr6t| t|}	}
t|D ]z}|dkrGt 	|	|}nt 	| j|t 	|| }|t 
t 	||   }t 	| |}|dkrrt 	|
|}nt 	|j|t 	|j| }|r|t 
t 	||   }t 	||t 	||   }|| }t 	|||k s|jd dkr n|}q:|d }||krtdt |||fS )	a?  Return the first left and right singular vectors of X'Y.

    Provides an alternative to the svd(X'Y) and uses the power method instead.
    With norm_y_weights to True and in mode A, this corresponds to the
    algorithm section 11.3 of the Wegelin's review, except this starts at the
    "update saliences" part.
    c                 3   s(    | ]}t t | kr|V  qd S N)r    anyabs).0colr#   r0   r1   	<genexpr>C   s   & z;_get_first_singular_vectors_power_method.<locals>.<genexpr>Y residual is constantNd   B   z$Maximum number of iterations reached)r    r"   r   r#   nextTStopIterationr2   ranger'   sqrtshapewarningswarnr   )XYmodemax_itertolnorm_y_weightsy_scoreex_weights_oldX_pinvY_pinvi	x_weightsx_score	y_weightsx_weights_diffn_iterr0   r;   r1   (_get_first_singular_vectors_power_method6   s<   

rZ   c                 C   s@   t | j|}t|dd\}}}|dddf |dddf fS )zbReturn the first left and right singular vectors of X'Y.

    Here the whole SVD is computed.
    Fr   Nr   )r    r'   rB   r   )rI   rJ   CU_Vtr0   r0   r1   _get_first_singular_vectors_svdq   s    r`   Tc                 C   s   | j dd}| |8 } |j dd}||8 }|r9| jddd}d||dk< | | } |jddd}d||dk< || }nt| jd }t|jd }| |||||fS )z{Center X, Y and scale if the scale parameter==True

    Returns
    -------
        X, Y, x_mean, y_mean, x_std, y_std
    r   axisr@   )rb   ddofg      ?        )meanstdr    onesrF   )rI   rJ   scalex_meany_meanx_stdy_stdr0   r0   r1   _center_scale_xy{   s   
rm   c                 C   s2   t t | }t | | }| |9 } ||9 }dS )z7Same as svd_flip but works on 1d arrays, and is inplaceN)r    argmaxr8   sign)r)   vbiggest_abs_val_idxro   r0   r0   r1   _svd_flip_1d   s   rr   c                
   @   sv   e Zd ZdZe	ddddddddd	d
dZdd ZdddZdddZdddZ	dddZ
edd Zdd ZdS )_PLSa  Partial Least Squares (PLS)

    This class implements the generic PLS algorithm.

    Main ref: Wegelin, a survey of Partial Least Squares (PLS) methods,
    with emphasis on the two-block case
    https://www.stat.washington.edu/research/reports/2000/tr371.pdf
    r   T
regressionr3   nipalsr4   r5   )rh   deflation_moderK   	algorithmrL   rM   copyc          	      C   s4   || _ || _|| _|| _|| _|| _|| _|| _d S r6   )n_componentsrv   rK   rh   rw   rL   rM   rx   )	selfry   rh   rv   rK   rw   rL   rM   rx   r0   r0   r1   __init__   s   
z_PLS.__init__c                 C   s  t || | j|tj| jdd}t|dtj| jdd}|jdkr&|dd}|jd }|jd }|jd }| j	}| j
d	krJ|}t|d
tjd|d nt|||}t|d
tjd|d | jdvrhtd| j d| j
dk| _| j}t||| j\}	}
| _| _| _| _t||f| _t||f| _t||f| _t||f| _t||f| _t||f| _g | _t |
j!j"}t#|D ]}| jdkrtj$t%|
d| k dd}d|
dd|f< zt&|	|
| j'| j(| j)|d\}}}W n$ t*y } zt+|dkr t,-d|  W Y d}~ nd}~ww | j.| n| jdkr*t/|	|
\}}t0|| t1|	|}|r;d}nt1||}t1|
|| }t1||	t1|| }|	t2||8 }	| j
dkrwt1||
t1|| }|
t2||8 }
| j
d	krt1||
t1|| }|
t2||8 }
|| jdd|f< || jdd|f< || jdd|f< || jdd|f< || jdd|f< || jdd|f< qt1| jt3t1| jj4| jdd| _5t1| jt3t1| jj4| jdd| _6t1| j5| jj4| _7| j7| j j4| _7| j| _8| j5jd | _9| S )  Fit model to data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training vectors, where `n_samples` is the number of samples and
            `n_features` is the number of predictors.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Target vectors, where `n_samples` is the number of samples and
            `n_targets` is the number of response variables.

        Returns
        -------
        self : object
            Fitted model.
        r   r   rx   ensure_min_samplesrJ   F
input_namer   rx   	ensure_2dr@   r   rt   ry   min_valmax_val)r   ru   z+algorithm should be 'svd' or 'nipals', got .	canonicalru   
   ra   rd   N)rK   rL   rM   rN   r=   z$Y residual is constant at iteration r   )r   ):r   _validate_datar    float64rx   r   ndimreshaperF   ry   rv   r   numbersIntegralminrw   
ValueError_norm_y_weightsrm   rh   _x_mean_y_mean_x_std_y_stdzeros
x_weights_
y_weights_	_x_scores	_y_scoresx_loadings_y_loadings_n_iter_r"   r   r#   rD   allr8   rZ   rK   rL   rM   rC   strrG   rH   appendr`   rr   r'   outerr   rB   x_rotations_y_rotations__coef_
intercept__n_features_out)rz   rI   rJ   npqry   rank_upper_boundrN   XkYkY_epskYk_maskrU   rW   r   rP   x_scoresy_ssy_scores
x_loadings
y_loadingsr0   r0   r1   fit   s   







	
	z_PLS.fitNc                 C   s   t |  | j||tdd}|| j8 }|| j }t|| j}|durKt|dd|td}|j	dkr6|
dd}|| j8 }|| j }t|| j}||fS |S )a.  Apply the dimension reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples to transform.

        Y : array-like of shape (n_samples, n_targets), default=None
            Target vectors.

        copy : bool, default=True
            Whether to copy `X` and `Y`, or perform in-place normalization.

        Returns
        -------
        x_scores, y_scores : array-like or tuple of array-like
            Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
        Frx   r   resetNrJ   )r   r   rx   r   r@   r   )r   r   r   r   r   r    r'   r   r   r   r   r   r   r   )rz   rI   rJ   rx   r   r   r0   r0   r1   	transformm  s    





z_PLS.transformc                 C   s   t |  t|dtd}t|| jj}|| j9 }|| j7 }|dur>t|dtd}t|| j	j}|| j
9 }|| j7 }||fS |S )ae  Transform data back to its original space.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_components)
            New data, where `n_samples` is the number of samples
            and `n_components` is the number of pls components.

        Y : array-like of shape (n_samples, n_components)
            New target, where `n_samples` is the number of samples
            and `n_components` is the number of pls components.

        Returns
        -------
        X_reconstructed : ndarray of shape (n_samples, n_features)
            Return the reconstructed `X` data.

        Y_reconstructed : ndarray of shape (n_samples, n_targets)
            Return the reconstructed `X` target. Only returned when `Y` is given.

        Notes
        -----
        This transformation will only be exact if `n_components=n_features`.
        rI   )r   r   NrJ   )r   r   r   r    matmulr   rB   r   r   r   r   r   )rz   rI   rJ   X_reconstructedY_reconstructedr0   r0   r1   inverse_transform  s   



z_PLS.inverse_transformc                 C   sD   t |  | j||tdd}|| j8 }|| j }|| jj }|| j S )aU  Predict targets of given samples.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples.

        copy : bool, default=True
            Whether to copy `X` and `Y`, or perform in-place normalization.

        Returns
        -------
        y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
            Returns predicted values.

        Notes
        -----
        This call requires the estimation of a matrix of shape
        `(n_features, n_targets)`, which may be an issue in high dimensional
        space.
        Fr   )r   r   r   r   r   r   rB   r   )rz   rI   rx   Ypredr0   r0   r1   predict  s   


z_PLS.predictc                 C      |  ||||S )a  Learn and apply the dimension reduction on the train data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training vectors, where `n_samples` is the number of samples and
            `n_features` is the number of predictors.

        y : array-like of shape (n_samples, n_targets), default=None
            Target vectors, where `n_samples` is the number of samples and
            `n_targets` is the number of response variables.

        Returns
        -------
        self : ndarray of shape (n_samples, n_components)
            Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
        r   r   rz   rI   yr0   r0   r1   fit_transform     z_PLS.fit_transformc                 C   s0   t | drt| ddrtdt d| _| jjS )z%The coefficients of the linear model.r   _coef_warningTzThe attribute `coef_` will be transposed in version 1.3 to be consistent with other linear models in scikit-learn. Currently, `coef_` has a shape of (n_features, n_targets) and in the future it will have a shape of (n_targets, n_features).F)hasattrgetattrrG   rH   FutureWarningr   r   rB   rz   r0   r0   r1   coef_  s   z
_PLS.coef_c                 C   s
   dddS )NTF)
poor_score
requires_yr0   r   r0   r0   r1   
_more_tags  s   
z_PLS._more_tagsr   )NTr6   T)__name__
__module____qualname____doc__r   r{   r   r   r   r   r   propertyr   r   r0   r0   r0   r1   rs      s,    	 
(
'
,

rs   )	metaclassc                       s<   e Zd ZdZ	dddddd fddZ fd	d
Z  ZS )r   a  PLS regression.

    PLSRegression is also known as PLS2 or PLS1, depending on the number of
    targets.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    max_iter : int, default=500
        The maximum number of iterations of the power method when
        `algorithm='nipals'`. Ignored otherwise.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in :term:`fit` before applying centering,
        and potentially scaling. If `False`, these operations will be done
        inplace, modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_scores_ : ndarray of shape (n_samples, n_components)
        The transformed training samples.

    y_scores_ : ndarray of shape (n_samples, n_components)
        The transformed training targets.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_features, n_targets)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.

    Examples
    --------
    >>> from sklearn.cross_decomposition import PLSRegression
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> pls2 = PLSRegression(n_components=2)
    >>> pls2.fit(X, Y)
    PLSRegression()
    >>> Y_pred = pls2.predict(X)
    r   Tr4   r5   rh   rL   rM   rx   c             
          t  j||ddd|||d d S )Nrt   r3   ru   ry   rh   rv   rK   rw   rL   rM   rx   superr{   rz   ry   rh   rL   rM   rx   	__class__r0   r1   r{   t     
zPLSRegression.__init__c                    s"   t  || | j| _| j| _| S )r|   )r   r   r   	x_scores_r   	y_scores_)rz   rI   rJ   r   r0   r1   r     s   zPLSRegression.fitr   )r   r   r   r   r{   r   __classcell__r0   r0   r   r1   r     s    ir   c                       s2   e Zd ZdZ	d
dddddd fdd	Z  ZS )r   a  Partial Least Squares transformer and regressor.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    algorithm : {'nipals', 'svd'}, default='nipals'
        The algorithm used to estimate the first singular vectors of the
        cross-covariance matrix. 'nipals' uses the power method while 'svd'
        will compute the whole SVD.

    max_iter : int, default=500
        The maximum number of iterations of the power method when
        `algorithm='nipals'`. Ignored otherwise.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If False, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_features, n_targets)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component. Empty if `algorithm='svd'`.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    CCA : Canonical Correlation Analysis.
    PLSSVD : Partial Least Square SVD.

    Examples
    --------
    >>> from sklearn.cross_decomposition import PLSCanonical
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> plsca = PLSCanonical(n_components=2)
    >>> plsca.fit(X, Y)
    PLSCanonical()
    >>> X_c, Y_c = plsca.transform(X, Y)
    r   Tru   r4   r5   )rh   rw   rL   rM   rx   c             
      s    t  j||dd||||d d S )Nr   r3   r   r   )rz   ry   rh   rw   rL   rM   rx   r   r0   r1   r{     s   

zPLSCanonical.__init__r   r   r   r   r   r{   r   r0   r0   r   r1   r     s    ir   c                       s0   e Zd ZdZ	d	ddddd fddZ  ZS )
CCAap  Canonical Correlation Analysis, also known as "Mode B" PLS.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    max_iter : int, default=500
        The maximum number of iterations of the power method.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If False, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_features, n_targets)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.
    PLSSVD : Partial Least Square SVD.

    Examples
    --------
    >>> from sklearn.cross_decomposition import CCA
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> cca = CCA(n_components=1)
    >>> cca.fit(X, Y)
    CCA(n_components=1)
    >>> X_c, Y_c = cca.transform(X, Y)
    r   Tr4   r5   r   c             
      r   )Nr   r?   ru   r   r   r   r   r0   r1   r{   q  r   zCCA.__init__r   r   r0   r0   r   r1   r     s    Xr   c                   @   s>   e Zd ZdZddddddZdd Zdd
dZdddZd	S )r   a  Partial Least Square SVD.

    This transformer simply performs a SVD on the cross-covariance matrix
    `X'Y`. It is able to project both the training data `X` and the targets
    `Y`. The training data `X` is projected on the left singular vectors, while
    the targets are projected on the right singular vectors.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        The number of components to keep. Should be in `[1,
        min(n_samples, n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If `False`, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the SVD of the cross-covariance matrix.
        Used to project `X` in :meth:`transform`.

    y_weights_ : ndarray of (n_targets, n_components)
        The right singular vectors of the SVD of the cross-covariance matrix.
        Used to project `X` in :meth:`transform`.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.
    CCA : Canonical Correlation Analysis.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.cross_decomposition import PLSSVD
    >>> X = np.array([[0., 0., 1.],
    ...               [1., 0., 0.],
    ...               [2., 2., 2.],
    ...               [2., 5., 4.]])
    >>> Y = np.array([[0.1, -0.2],
    ...               [0.9, 1.1],
    ...               [6.2, 5.9],
    ...               [11.9, 12.3]])
    >>> pls = PLSSVD(n_components=2).fit(X, Y)
    >>> X_c, Y_c = pls.transform(X, Y)
    >>> X_c.shape, Y_c.shape
    ((4, 2), (4, 2))
    r   T)rh   rx   c                C   s   || _ || _|| _d S r6   )ry   rh   rx   )rz   ry   rh   rx   r0   r0   r1   r{     s   
zPLSSVD.__init__c           
      C   s  t || | j|tj| jdd}t|dtj| jdd}|jdkr&|dd}| j}t	|j
d |j
d |j
d }t|d	tjd|d
 t||| j\}}| _| _| _| _t|j|}t|dd\}}}|ddd|f }|d| }t||\}}|j}	|| _|	| _| jj
d | _| S )aJ  Fit model to data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training samples.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Targets.

        Returns
        -------
        self : object
            Fitted estimator.
        r   r}   rJ   Fr   r@   r   r   ry   r   r[   N)r   r   r    r   rx   r   r   r   ry   r   rF   r   r   r   rm   rh   r   r   r   r   r'   rB   r   r   r   r   r   )
rz   rI   rJ   ry   r   r\   r]   r*   r_   Vr0   r0   r1   r     s>   

z
PLSSVD.fitNc                 C   s   t |  | j|tjdd}|| j | j }t|| j}|durGt|ddtjd}|j	dkr4|
dd}|| j | j }t|| j}||fS |S )a  
        Apply the dimensionality reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples to be transformed.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets),                 default=None
            Targets.

        Returns
        -------
        x_scores : array-like or tuple of array-like
            The transformed data `X_tranformed` if `Y is not None`,
            `(X_transformed, Y_transformed)` otherwise.
        F)r   r   NrJ   )r   r   r   r@   r   )r   r   r    r   r   r   r'   r   r   r   r   r   r   r   )rz   rI   rJ   Xrr   Yrr   r0   r0   r1   r     s   
zPLSSVD.transformc                 C   r   )a  Learn and apply the dimensionality reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training samples.

        y : array-like of shape (n_samples,) or (n_samples, n_targets),                 default=None
            Targets.

        Returns
        -------
        out : array-like or tuple of array-like
            The transformed data `X_tranformed` if `Y is not None`,
            `(X_transformed, Y_transformed)` otherwise.
        r   r   r0   r0   r1   r   !  r   zPLSSVD.fit_transformr   r6   )r   r   r   r   r{   r   r   r   r0   r0   r0   r1   r     s    C
8 r   )r3   r4   r5   Fr   )+r   r   rG   abcr   r   numpyr    scipy.linalgr   baser   r   r   r	   r
   utilsr   r   r   utils.fixesr   r   utils.extmathr   utils.validationr   r   
exceptionsr   __all__r   r   r2   rZ   r`   rm   rr   rs   r   r   r   r   r0   r0   r0   r1   <module>   sP    
;



  n ~g