o
    tBhnm                  
   @   s   d Z ddlZddlmZmZmZmZ ddlm	Z	m
Z
 ddlmZ ddlmZmZmZmZ ddlmZmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ d"ddZd#ddZ	d$ddZ dddddddddd	ddZ!G d d! d!eeeeZ"dS )%zLocally Linear Embedding    N)eighsvdqrsolve)eye
csr_matrix)eigsh   )BaseEstimatorTransformerMixin_UnstableArchMixin _ClassNamePrefixFeaturesOutMixin)check_random_statecheck_array)_init_arpack_v0)stable_cumsum)check_is_fitted)FLOAT_DTYPES)NearestNeighborsMbP?c                 C   s   t | td} t |td}t |td}|j\}}| jd |ks J tj||f| jd}tj|| jd}t|D ]G\}}	||	 }
|
| |  }t	||j
}t|}|dkrY|| }n|}|jdd|d   |7  < t||dd}|t| ||ddf< q6|S )a  Compute barycenter weights of X from Y along the first axis

    We estimate the weights to assign to each point in Y[indices] to recover
    the point X[i]. The barycenter weights sum to 1.

    Parameters
    ----------
    X : array-like, shape (n_samples, n_dim)

    Y : array-like, shape (n_samples, n_dim)

    indices : array-like, shape (n_samples, n_dim)
            Indices of the points in Y used to compute the barycenter

    reg : float, default=1e-3
        amount of regularization to add for the problem to be
        well-posed in the case of n_neighbors > n_dim

    Returns
    -------
    B : array-like, shape (n_samples, n_neighbors)

    Notes
    -----
    See developers note for more information.
    dtyper   N   T)sym_pos)r   r   intshapenpemptyr   ones	enumeratedotTtraceflatr   sum)XYindicesreg	n_samplesn_neighborsBviindACGr"   Rw r4   w/var/www/html/riverr-enterprise-integrations-main/venv/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.pybarycenter_weights   s&   


r6   c           	      C   s   t |d |d| }|j} |j}|j| ddddddf }t| | ||d}td|| d |}t|	 |	 |f||fdS )	a-  Computes the barycenter weighted graph of k-Neighbors for points in X

    Parameters
    ----------
    X : {array-like, NearestNeighbors}
        Sample data, shape = (n_samples, n_features), in the form of a
        numpy array or a NearestNeighbors object.

    n_neighbors : int
        Number of neighbors for each sample.

    reg : float, default=1e-3
        Amount of regularization when solving the least-squares
        problem. Only relevant if mode='barycenter'. If None, use the
        default.

    n_jobs : int or None, default=None
        The number of parallel jobs to run for neighbors search.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.

    Returns
    -------
    A : sparse matrix in CSR format, shape = [n_samples, n_samples]
        A[i, j] is assigned the weight of edge that connects i to j.

    See Also
    --------
    sklearn.neighbors.kneighbors_graph
    sklearn.neighbors.radius_neighbors_graph
    r   r*   n_jobsF)return_distanceNr(   r   )r   )
r   fit_fit_Xn_samples_fit_
kneighborsr6   r   aranger   ravel)	r%   r*   r(   r8   knnr)   r.   dataindptrr4   r4   r5   barycenter_kneighbors_graphP   s   !rD   r   arpackư>d   c              
   C   s   |dkr| j d dkr|| dk rd}nd}|dkrYt| j d |}zt| || d|||d\}}	W n tyE }
 ztd	|
 |
d
}
~
ww |	d
d
|d
f t||d
 fS |dkrt| drf|  } t	| ||| d fdd\}}	t
t|}|	d
d
|f t|fS td| )a0  
    Find the null space of a matrix M.

    Parameters
    ----------
    M : {array, matrix, sparse matrix, LinearOperator}
        Input covariance matrix: should be symmetric positive semi-definite

    k : int
        Number of eigenvalues/vectors to return

    k_skip : int, default=1
        Number of low eigenvalues to skip.

    eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack'
        auto : algorithm will attempt to choose the best method for input data
        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.
        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.

    tol : float, default=1e-6
        Tolerance for 'arpack' method.
        Not used if eigen_solver=='dense'.

    max_iter : int, default=100
        Maximum number of iterations for 'arpack' method.
        Not used if eigen_solver=='dense'

    random_state : int, RandomState instance, default=None
        Determines the random number generator when ``solver`` == 'arpack'.
        Pass an int for reproducible results across multiple function calls.
        See :term:`Glossary <random_state>`.
    autor      
   rE   denseg        )sigmatolmaxiterv0a	  Error in determining null-space with ARPACK. Error message: '%s'. Note that eigen_solver='arpack' can fail when the weight matrix is singular or otherwise ill-behaved. In that case, eigen_solver='dense' is recommended. See online documentation for more information.Ntoarrayr   T)eigvalsoverwrite_azUnrecognized eigen_solver '%s')r   r   r   RuntimeError
ValueErrorr   r$   hasattrrP   r   argsortabs)Mkk_skipeigen_solverrM   max_iterrandom_staterO   eigen_valueseigen_vectorseindexr4   r4   r5   
null_spacez   s<   *&	

rb   rH   standard-C6?-q=)	r(   r[   rM   r\   methodhessian_tolmodified_tolr]   r8   c          ;   	   C   s  |dvr
t d| |dvrt d| t|d |d}||  |j} | j\}}||kr1t d||kr=t d||f |d	krEt d
|dk}|dkrt||||d}|rkt|jd|ji| }|j| 	 }n|j| |j | 
 }|jdd|jd	 d   d7  < n|dkr||d  d }||| krt d|j| |d dd}|ddddf }tj|d| | ftjd}d|ddd	f< tj||ftjd}||k}t|D ]}| ||  }||d	8 }|rt|d	dd	 }nt||j}t|d dddddf }|ddd|f |dddd| f< d| }t|D ]+}|dd||d f |dd||f  |dd||| | f< ||| 7 }q)t|\}}|dd|d df }|d	}d|tt||k < || }t|| || \} }!|| |!f  t||j7  < q|rt|}n|dkr||k rt d|j| |d dd}|ddddf }t|||f}"t||}#t||#g}$||k}|rt|D ]}| ||  | |  }%t|%dd\|"|< |$|< }&q|$dC }$n5t|D ]0}| ||  | |  }%t|%|%j}'t|'\}(})|(ddd |$|< |)dddddf |"|< qd|$d }t|"d	ddt|}*|*ddd|#f  |$|dddf    < |*dd|#df  |dddf   < t||f}+t|D ]}t|"| |*| |+|< q|+|+ddddf  }+|$dd|df d|$ddd|f d },t|,}-tj|t d}.t!|$d}/|/ddddf |/ddddf  d }0t|D ]}t"|0|dddf |-|.|< q|.||# 7 }.tj||ftjd}t|D ]}|.| }1|"|dd||1 df }2tj#$|2d	t%|1 }3t&|1|3t|2jt| }4tj#$|4}5|5|	k rJ|4d	9 }4n|4|5 }4|2dt't|2|4|4  d|3 |+|dddf   }6t|| || \} }!|| |!f  t|6|6j7  < |6d}7|||| f  |78  < ||| |f  |78  < |||f  |17  < q	|rt|}n|dkrc|j| |d dd}|ddddf }t||f}||k}t|D ]}| ||  }8|8|8d	8 }8|rt|8ddd	 }9nt|8|8j}t|d dddddf }9t||d f}|9ddd|f |ddddf< dt%| |ddd	f< t||j}:t|| || \} }!|| |!f  |:8  < ||| || f  d7  < qt(||d||||
dS )aG  Perform a Locally Linear Embedding analysis on the data.

    Read more in the :ref:`User Guide <locally_linear_embedding>`.

    Parameters
    ----------
    X : {array-like, NearestNeighbors}
        Sample data, shape = (n_samples, n_features), in the form of a
        numpy array or a NearestNeighbors object.

    n_neighbors : int
        number of neighbors to consider for each point.

    n_components : int
        number of coordinates for the manifold.

    reg : float, default=1e-3
        regularization constant, multiplies the trace of the local covariance
        matrix of the distances.

    eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
        auto : algorithm will attempt to choose the best method for input data

        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.

        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.

    tol : float, default=1e-6
        Tolerance for 'arpack' method
        Not used if eigen_solver=='dense'.

    max_iter : int, default=100
        maximum number of iterations for the arpack solver.

    method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard'
        standard : use the standard locally linear embedding algorithm.
                   see reference [1]_
        hessian  : use the Hessian eigenmap method.  This method requires
                   n_neighbors > n_components * (1 + (n_components + 1) / 2.
                   see reference [2]_
        modified : use the modified locally linear embedding algorithm.
                   see reference [3]_
        ltsa     : use local tangent space alignment algorithm
                   see reference [4]_

    hessian_tol : float, default=1e-4
        Tolerance for Hessian eigenmapping method.
        Only used if method == 'hessian'

    modified_tol : float, default=1e-12
        Tolerance for modified LLE method.
        Only used if method == 'modified'

    random_state : int, RandomState instance, default=None
        Determines the random number generator when ``solver`` == 'arpack'.
        Pass an int for reproducible results across multiple function calls.
        See :term:`Glossary <random_state>`.

    n_jobs : int or None, default=None
        The number of parallel jobs to run for neighbors search.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.

    Returns
    -------
    Y : array-like, shape [n_samples, n_components]
        Embedding vectors.

    squared_error : float
        Reconstruction error for the embedding vectors. Equivalent to
        ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.

    References
    ----------

    .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
        by locally linear embedding.  Science 290:2323 (2000).
    .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
        linear embedding techniques for high-dimensional data.
        Proc Natl Acad Sci U S A.  100:5591 (2003).
    .. [3] Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
        Embedding Using Multiple Weights.
        http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
    .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
        dimensionality reduction via tangent space alignment.
        Journal of Shanghai Univ.  8:406 (2004)
    )rH   rE   rK   zunrecognized eigen_solver '%s')rc   hessianmodifiedltsazunrecognized method '%s'r   r7   z>output dimension must be less than or equal to input dimensionzHExpected n_neighbors <= n_samples,  but n_samples = %d, n_neighbors = %dr   zn_neighbors must be positiverK   rc   )r*   r(   r8   formatNri   r	   z^for method='hessian', n_neighbors must be greater than [n_components * (n_components + 3) / 2]Fr*   r9   r   )full_matricesrj   z1modified LLE requires n_neighbors >= n_componentsTr   rk   g      ?)rZ   r[   rM   r\   r]   ))rT   r   r;   r<   r   rD   r   rl   r!   tocsrrP   r#   r>   r   r   float64zerosrangemeanr   r    r   r   r$   whererW   meshgridr   min	transposer   medianr   r   searchsortedlinalgnormsqrtfullouterrb   );r%   r*   n_componentsr(   r[   rM   r\   rf   rg   rh   r]   r8   nbrsNd_inM_sparseWrX   dp	neighborsYiuse_svdr-   GiUCijrY   Qr2   r3   Snbrs_xnbrs_yVnevevalsX_nbrs_C_nbrsevivitmpw_regrhoetas_rangeevals_cumsum	eta_ranges_iVialpha_ihnorm_hWiWi_sum1Xir,   GiGiTr4   r4   r5   locally_linear_embedding   s4  n

&

(D
 




,(4

," 

6


$ r   c                   @   sX   e Zd ZdZdddddddd	d
ddddddZdd ZdddZdddZdd ZdS )LocallyLinearEmbeddingaf  Locally Linear Embedding.

    Read more in the :ref:`User Guide <locally_linear_embedding>`.

    Parameters
    ----------
    n_neighbors : int, default=5
        Number of neighbors to consider for each point.

    n_components : int, default=2
        Number of coordinates for the manifold.

    reg : float, default=1e-3
        Regularization constant, multiplies the trace of the local covariance
        matrix of the distances.

    eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
        The solver used to compute the eigenvectors. The available options are:

        - `'auto'` : algorithm will attempt to choose the best method for input
          data.
        - `'arpack'` : use arnoldi iteration in shift-invert mode. For this
          method, M may be a dense matrix, sparse matrix, or general linear
          operator.
        - `'dense'`  : use standard dense matrix operations for the eigenvalue
          decomposition. For this method, M must be an array or matrix type.
          This method should be avoided for large problems.

        .. warning::
           ARPACK can be unstable for some problems.  It is best to try several
           random seeds in order to check results.

    tol : float, default=1e-6
        Tolerance for 'arpack' method
        Not used if eigen_solver=='dense'.

    max_iter : int, default=100
        Maximum number of iterations for the arpack solver.
        Not used if eigen_solver=='dense'.

    method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard'
        - `standard`: use the standard locally linear embedding algorithm. see
          reference [1]_
        - `hessian`: use the Hessian eigenmap method. This method requires
          ``n_neighbors > n_components * (1 + (n_components + 1) / 2``. see
          reference [2]_
        - `modified`: use the modified locally linear embedding algorithm.
          see reference [3]_
        - `ltsa`: use local tangent space alignment algorithm. see
          reference [4]_

    hessian_tol : float, default=1e-4
        Tolerance for Hessian eigenmapping method.
        Only used if ``method == 'hessian'``.

    modified_tol : float, default=1e-12
        Tolerance for modified LLE method.
        Only used if ``method == 'modified'``.

    neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'},                           default='auto'
        Algorithm to use for nearest neighbors search, passed to
        :class:`~sklearn.neighbors.NearestNeighbors` instance.

    random_state : int, RandomState instance, default=None
        Determines the random number generator when
        ``eigen_solver`` == 'arpack'. Pass an int for reproducible results
        across multiple function calls. See :term:`Glossary <random_state>`.

    n_jobs : int or None, default=None
        The number of parallel jobs to run.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.

    Attributes
    ----------
    embedding_ : array-like, shape [n_samples, n_components]
        Stores the embedding vectors

    reconstruction_error_ : float
        Reconstruction error associated with `embedding_`

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    nbrs_ : NearestNeighbors object
        Stores nearest neighbors instance, including BallTree or KDtree
        if applicable.

    See Also
    --------
    SpectralEmbedding : Spectral embedding for non-linear dimensionality
        reduction.
    TSNE : Distributed Stochastic Neighbor Embedding.

    References
    ----------

    .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
        by locally linear embedding.  Science 290:2323 (2000).
    .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
        linear embedding techniques for high-dimensional data.
        Proc Natl Acad Sci U S A.  100:5591 (2003).
    .. [3] Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
        Embedding Using Multiple Weights.
        http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
    .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
        dimensionality reduction via tangent space alignment.
        Journal of Shanghai Univ.  8:406 (2004)

    Examples
    --------
    >>> from sklearn.datasets import load_digits
    >>> from sklearn.manifold import LocallyLinearEmbedding
    >>> X, _ = load_digits(return_X_y=True)
    >>> X.shape
    (1797, 64)
    >>> embedding = LocallyLinearEmbedding(n_components=2)
    >>> X_transformed = embedding.fit_transform(X[:100])
    >>> X_transformed.shape
    (100, 2)
       r	   r   rH   rF   rG   rc   rd   re   N)r*   r   r(   r[   rM   r\   rf   rg   rh   neighbors_algorithmr]   r8   c                C   sL   || _ || _|| _|| _|| _|| _|| _|| _|	| _|| _	|
| _
|| _d S N)r*   r   r(   r[   rM   r\   rf   rg   rh   r]   r   r8   )selfr*   r   r(   r[   rM   r\   rf   rg   rh   r   r]   r8   r4   r4   r5   __init__  s   
zLocallyLinearEmbedding.__init__c                 C   s   t | j| j| jd| _t| j}| j|td}| j	| t
| j| j| j| j| j| j| j| j| j|| j| jd\| _| _| jjd | _d S )N)r*   	algorithmr8   r   )r%   r*   r   r[   rM   r\   rf   rg   rh   r]   r(   r8   r   )r   r*   r   r8   nbrs_r   r]   _validate_datafloatr;   r   r   r[   rM   r\   rf   rg   rh   r(   
embedding_reconstruction_error_r   _n_features_out)r   r%   r]   r4   r4   r5   _fit_transform  s.   
z%LocallyLinearEmbedding._fit_transformc                 C   s   |  | | S )ay  Compute the embedding vectors for data X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training set.

        y : Ignored
            Not used, present here for API consistency by convention.

        Returns
        -------
        self : object
            Fitted `LocallyLinearEmbedding` class instance.
        )r   r   r%   yr4   r4   r5   r;     s   
zLocallyLinearEmbedding.fitc                 C   s   |  | | jS )a  Compute the embedding vectors for data X and transform X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training set.

        y : Ignored
            Not used, present here for API consistency by convention.

        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
            Returns the instance itself.
        )r   r   r   r4   r4   r5   fit_transform  s   
z$LocallyLinearEmbedding.fit_transformc                 C   s   t |  | j|dd}| jj|| jdd}t|| jj|| jd}t	|j
d | jf}t|j
d D ]}t| j||  j|| ||< q2|S )a  
        Transform new points into embedding space.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training set.

        Returns
        -------
        X_new : ndarray of shape (n_samples, n_components)
            Returns the instance itself.

        Notes
        -----
        Because of scaling performed by this method, it is discouraged to use
        it together with methods that are not scale-invariant (like SVMs).
        F)resetrm   r:   r   )r   r   r   r>   r*   r6   r<   r(   r   r   r   r   rs   r    r   r!   )r   r%   r.   weightsX_newr-   r4   r4   r5   	transform  s   "z LocallyLinearEmbedding.transformr   )	__name__
__module____qualname____doc__r   r   r;   r   r   r4   r4   r4   r5   r   &  s(     

r   )r   )r   N)r   rE   rF   rG   N)#r   numpyr   scipy.linalgr   r   r   r   scipy.sparser   r   scipy.sparse.linalgr   baser
   r   r   r   utilsr   r   utils._arpackr   utils.extmathr   utils.validationr   r   r   r   r6   rD   rb   r   r   r4   r4   r4   r5   <module>   sD    

6+
Q  
b