
    <h                     H    S SK Jr  SSKJrJr  SSKJr   " S S\5      rS/rg)   )PretrainedConfig   )CONFIG_MAPPING
AutoConfig)SuperPointConfigc                      ^  \ rS rSrSrSrS\0r             SS\S\	S\	S\	S\
S	\
S
\
S\
S\S\4U 4S jjjrSrU =r$ )LightGlueConfig   a  
This is the configuration class to store the configuration of a [`LightGlueForKeypointMatching`]. It is used to
instantiate a LightGlue model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LightGlue
[ETH-CVG/lightglue_superpoint](https://huggingface.co/ETH-CVG/lightglue_superpoint) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    keypoint_detector_config (`Union[AutoConfig, dict]`,  *optional*, defaults to `SuperPointConfig`):
        The config object or dictionary of the keypoint detector.
    descriptor_dim (`int`, *optional*, defaults to 256):
        The dimension of the descriptors.
    num_hidden_layers (`int`, *optional*, defaults to 9):
        The number of self and cross attention layers.
    num_attention_heads (`int`, *optional*, defaults to 4):
        The number of heads in the multi-head attention.
    num_key_value_heads (`int`, *optional*):
        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
        `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
        `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
        by meanpooling all the original heads within that group. For more details checkout [this
        paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
        `num_attention_heads`.
    depth_confidence (`float`, *optional*, defaults to 0.95):
        The confidence threshold used to perform early stopping
    width_confidence (`float`, *optional*, defaults to 0.99):
        The confidence threshold used to prune points
    filter_threshold (`float`, *optional*, defaults to 0.1):
        The confidence threshold used to filter matches
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    hidden_act (`str`, *optional*, defaults to `"gelu"`):
        The activation function to be used in the hidden layers.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    attention_bias (`bool`, *optional*, defaults to `True`):
        Whether to use a bias in the query, key, value and output projection layers during self-attention.
    trust_remote_code (`bool`, *optional*, defaults to `False`):
        Whether to trust remote code when using other models than SuperPoint as keypoint detector.

Examples:
    ```python
    >>> from transformers import LightGlueConfig, LightGlueForKeypointMatching

    >>> # Initializing a LightGlue style configuration
    >>> configuration = LightGlueConfig()

    >>> # Initializing a model from the LightGlue style configuration
    >>> model = LightGlueForKeypointMatching(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
	lightgluekeypoint_detector_configdescriptor_dimnum_hidden_layersnum_attention_headsdepth_confidencewidth_confidencefilter_thresholdinitializer_range
hidden_acttrust_remote_codec                   > Xl         X$-  S:w  a  [        S5      eX l        X0l        X@l        Uc  UnXPl        X`l        Xpl        Xl        Xl	        [        U[        5      (       a[  UR                  SS5      US'   US   [        ;  a#  [        R                  " US   U R                   S9nO[        US      " S0 UDSS0D6nUc  [        S   " SS	9nXl        X l        US
-  U l        Xl        Xl        Xl        [,        TU ]\  " S0 UD6  g )N    z1descriptor_dim % num_heads is different from zero
model_type
superpoint_name_or_path)r   attn_implementationeager)r   r    )r   
ValueErrorr   r   r   num_key_value_headsr   r   r   r   
isinstancedictgetr   r   from_pretrainedr   hidden_sizeintermediate_sizer   attention_dropoutattention_biassuper__init__)selfr   r   r   r   r   r   r   r   r   r   r&   r'   r   kwargs	__class__s                  m/var/www/html/shao/venv/lib/python3.13/site-packages/transformers/models/lightglue/configuration_lightglue.pyr)   LightGlueConfig.__init__Y   s5   ( "3/14PQQ,!2#6  &"5#6  0 0 0!2 .555M5Q5QR^`l5m$\2'5^K+5+E+E,_=QUQgQg,( ,::RS_:`+a ,.,DK,( $+'5l'CX_'`$(@%)!/!!3$!2,"6"    )r'   r&   r   r   r   r   r$   r   r%   r   r   r   r   r   r   )N   	      Ngffffff?gGz?g?g{Gz?gelug        TF)__name__
__module____qualname____firstlineno____doc__r   r   sub_configsr   intfloatstrboolr)   __static_attributes____classcell__)r,   s   @r-   r	   r	      s    8t J-z:K 6:!!"#$ "&"&"%#' "'?#"2?# ?# 	?#
 !?#  ?#  ?#  ?# !?# ?#  ?# ?#r/   r	   N)	configuration_utilsr   autor   r   r   r   r	   __all__r   r/   r-   <module>rC      s*   , 4 - )}#& }#@ 
r/   