
    קgA)                     z   d Z ddgZddlmZmZ ddlZddlmZmZ ddl	m
Z
mZ 	 	 dd	ed
edee         dee         def
dZ	 	 	 dd	ed
ee         dee         dee         deeeef         f
dZ	 	 	 dd	ed
ee         dee         dee         deeeef         f
dZ	 	 	 dd	ed
ee         dededeeeef         f
dZdS )zBImplement various linear algebra algorithms for low rank matrices.svd_lowrankpca_lowrank    )OptionalTupleN)_linalg_utilsTensor)handle_torch_functionhas_torch_function   AqniterMreturnc                    |dn|}|                                  st          j        |           n| j        }t          j        }t          j        | j        d         ||| j                  } || |          }|| |||          z
  }t
          j	        
                    |          j        }t          |          D ]}	 || j        |          }|| ||j        |          z
  }t
          j	        
                    |          j        } || |          }|| |||          z
  }t
          j	        
                    |          j        }|S )a  Return tensor :math:`Q` with :math:`q` orthonormal columns such
    that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
    specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
    approximates :math:`A - M`. without instantiating any tensors
    of the size of :math:`A` or :math:`M`.

    .. note:: The implementation is based on the Algorithm 4.4 from
              Halko et al., 2009.

    .. note:: For an adequate approximation of a k-rank matrix
              :math:`A`, where k is not known in advance but could be
              estimated, the number of :math:`Q` columns, q, can be
              choosen according to the following criteria: in general,
              :math:`k <= q <= min(2*k, m, n)`. For large low-rank
              matrices, take :math:`q = k + 5..10`.  If k is
              relatively small compared to :math:`min(m, n)`, choosing
              :math:`q = k + 0..2` may be sufficient.

    .. note:: To obtain repeatable results, reset the seed for the
              pseudorandom number generator

    Args::
        A (Tensor): the input tensor of size :math:`(*, m, n)`

        q (int): the dimension of subspace spanned by :math:`Q`
                 columns.

        niter (int, optional): the number of subspace iterations to
                               conduct; ``niter`` must be a
                               nonnegative integer. In most cases, the
                               default value 2 is more than enough.

        M (Tensor, optional): the input tensor's mean of size
                              :math:`(*, m, n)`.

    References::
        - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
          structure with randomness: probabilistic algorithms for
          constructing approximate matrix decompositions,
          arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
          `arXiv <http://arxiv.org/abs/0909.4061>`_).
    Nr   dtypedevice)
is_complex_utilsget_floating_dtyper   matmultorchrandnshaper   linalgqrQrangemH)
r   r   r   r   r   r   RXr   is
             J/var/www/html/ai-engine/env/lib/python3.11/site-packages/torch/_lowrank.pyget_approximate_basisr&      s9   b AAEE01KF%a(((AGE]FAGBK%AAAA
 	q!A}q!A5\\ ! !F14OO=FF14OO#ALOOA F1aLL=FF1aLL ALOOA H       c                 Z   t           j                                        s{| |f}t          t	          t
          |                                        t           j        t          d          f          s)t          |          rt          t          || |||          S t          | |||          S )a  Return the singular value decomposition ``(U, S, V)`` of a matrix,
    batches of matrices, or a sparse matrix :math:`A` such that
    :math:`A \approx U \operatorname{diag}(S) V^{\text{H}}`. In case :math:`M` is given, then
    SVD is computed for the matrix :math:`A - M`.

    .. note:: The implementation is based on the Algorithm 5.1 from
              Halko et al., 2009.

    .. note:: For an adequate approximation of a k-rank matrix
              :math:`A`, where k is not known in advance but could be
              estimated, the number of :math:`Q` columns, q, can be
              choosen according to the following criteria: in general,
              :math:`k <= q <= min(2*k, m, n)`. For large low-rank
              matrices, take :math:`q = k + 5..10`.  If k is
              relatively small compared to :math:`min(m, n)`, choosing
              :math:`q = k + 0..2` may be sufficient.

    .. note:: This is a randomized method. To obtain repeatable results,
              set the seed for the pseudorandom number generator

    .. note:: In general, use the full-rank SVD implementation
              :func:`torch.linalg.svd` for dense matrices due to its 10x
              higher performance characteristics. The low-rank SVD
              will be useful for huge sparse matrices that
              :func:`torch.linalg.svd` cannot handle.

    Args::
        A (Tensor): the input tensor of size :math:`(*, m, n)`

        q (int, optional): a slightly overestimated rank of A.

        niter (int, optional): the number of subspace iterations to
                               conduct; niter must be a nonnegative
                               integer, and defaults to 2

        M (Tensor, optional): the input tensor's mean of size
                              :math:`(*, m, n)`, which will be broadcasted
                              to the size of A in this function.

    References::
        - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
          structure with randomness: probabilistic algorithms for
          constructing approximate matrix decompositions,
          arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
          `arXiv <https://arxiv.org/abs/0909.4061>`_).

    N)r   r   r   )r   jitis_scriptingsetmaptypeissubsetr   r
   r	   r   _svd_lowrank)r   r   r   r   
tensor_opss        r%   r   r   V   s    j 9!!## V
3tZ(())22\4::&
 
 	 ,,	 )Zau    Qeq1111r'   c                    |dn|}| j         dd          \  }}t          j        }|'|                    |                                           }||k     r| j        } ||j        }t          | |||          } ||j        |           }|| ||j        |          z
  }t          j        	                    |d          \  }	}
}|j        }|                    |	          }	||k     r||	}}	|	|
|fS )Nr(   r   r   F)full_matrices)
r   r   r   broadcast_tosizer!   r&   r   r   svd)r   r   r   r   mnr   r   BUSVhVs                r%   r0   r0      s     YAA7233<DAq]F}NN16688$$ 	1uuD=Aa%1555AqtQA}qtQ|77HAq"
A	A1uu!1a7Nr'   Tcenterc           	         t           j                                        sFt          |           t           j        ur+t          | f          rt          t          | f| |||          S | j        dd         \  }}|t          d||          }n=|dk    r|t          ||          k    s#t          d| dt          ||                     |dk    st          d| d	          t          j        |           }|st          | ||d
          S t          j        |           r1t          | j                  dk    rt          d          t           j                            | d          |z  }|                                d         }t          j        dt          |          |j        |j                  }	||	d<   t          j        |	|                                |df|| j                  }
t          j        | j        dd         d|fz   || j                  }t           j                            |
|          j        }t          | |||
          S |                     dd          }t          | |z
  ||d
          S )a  Performs linear Principal Component Analysis (PCA) on a low-rank
    matrix, batches of such matrices, or sparse matrix.

    This function returns a namedtuple ``(U, S, V)`` which is the
    nearly optimal approximation of a singular value decomposition of
    a centered matrix :math:`A` such that :math:`A \approx U \operatorname{diag}(S) V^{\text{H}}`

    .. note:: The relation of ``(U, S, V)`` to PCA is as follows:

                - :math:`A` is a data matrix with ``m`` samples and
                  ``n`` features

                - the :math:`V` columns represent the principal directions

                - :math:`S ** 2 / (m - 1)` contains the eigenvalues of
                  :math:`A^T A / (m - 1)` which is the covariance of
                  ``A`` when ``center=True`` is provided.

                - ``matmul(A, V[:, :k])`` projects data to the first k
                  principal components

    .. note:: Different from the standard SVD, the size of returned
              matrices depend on the specified rank and q
              values as follows:

                - :math:`U` is m x q matrix

                - :math:`S` is q-vector

                - :math:`V` is n x q matrix

    .. note:: To obtain repeatable results, reset the seed for the
              pseudorandom number generator

    Args:

        A (Tensor): the input tensor of size :math:`(*, m, n)`

        q (int, optional): a slightly overestimated rank of
                           :math:`A`. By default, ``q = min(6, m,
                           n)``.

        center (bool, optional): if True, center the input tensor,
                                 otherwise, assume that the input is
                                 centered.

        niter (int, optional): the number of subspace iterations to
                               conduct; niter must be a nonnegative
                               integer, and defaults to 2.

    References::

        - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
          structure with randomness: probabilistic algorithms for
          constructing approximate matrix decompositions,
          arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
          `arXiv <http://arxiv.org/abs/0909.4061>`_).

    )r   r@   r   r3   Nr(   r   zq(=z>) must be non-negative integer and not greater than min(m, n)=zniter(=z) must be non-negative integerr4   r   z8pca_lowrank input is expected to be 2-dimensional tensor)r3   )dimr      T)rB   keepdim)r   r*   r+   r.   r   r
   r	   r   r   min
ValueErrorr   r   r0   	is_sparselensparsesumindiceszerosr   r   sparse_coo_tensorvaluesonesmmmTmean)r   r   r@   r   r9   r:   r   ccolumn_indicesrK   C_t	ones_m1_tr   Cs                 r%   r   r      s   D 9!!## 77%,&&+=qd+C+C&(aT1&    WRSS\FQy1aLL1ffc!Qii^!^^SVWXZ[S\S\^^
 
 	
 QJJH5HHHIII%a((E 7Aq6666 ;qw<<1WXXXLQE**Q.Q+ &!(	
 
 
 $
%QXXZZ!QuQX
 
 
 Jqwss|q!f4E!(SSS	LOOC++.Aq3333FFudF++AE1ET::::r'   )r   N)r(   r   N)NTr   )__doc____all__typingr   r   r   r   r   r   torch.overridesr	   r
   intr&   r   r0   boolr    r'   r%   <module>r_      s   H H-
( " " " " " " " "  1 1 1 1 1 1 1 1 E E E E E E E E 	G GG
G C=G 	G
 G G G GX 	=2 =2=2}=2 C==2 	=2
 666!"=2 =2 =2 =2D 	 } C= 	
 666!"   H 	n; n;n;}n; n; 	n;
 666!"n; n; n; n; n; n;r'   