
    Ch                    j    S SK Jr  S SKrS SKJrJr  \(       a  S SKrS SKJ	r	  S SK
Jr   " S S5      rg)    )annotationsN)TYPE_CHECKINGAny)Tensor)SentenceTransformerc                      \ rS rSrSrS r S         SS jjrSS jr S         SS jjr\	SS j5       r
SS	 jr      SS
 jrSrg)SentenceEvaluator   a  
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.

The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.

Extend this class and implement __call__ for custom evaluators.
c                     SU l         S U l        g )NTgreater_is_betterprimary_metricselfs    j/var/www/html/shao/venv/lib/python3.13/site-packages/sentence_transformers/evaluation/SentenceEvaluator.py__init__SentenceEvaluator.__init__   s    !%"    Nc                    g)a7  
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.

Args:
    model: the model to evaluate
    output_path: path where predictions and metrics are written
        to
    epoch: the epoch where the evaluation takes place. This is
        used for the file prefixes. If this is -1, then we
        assume evaluation on test data.
    steps: the steps in the current epoch at time of the
        evaluation. This is used for the file prefixes. If this
        is -1, then we assume evaluation at the end of the
        epoch.

Returns:
    Either a score for the evaluation with a higher score
    indicating a better result, or a dictionary with scores. If
    the latter is chosen, then `evaluator.primary_metric` must
    be defined
N )r   modeloutput_pathepochstepss        r   __call__SentenceEvaluator.__call__   s    2 	r   c                   SS jnU(       d*  UR                  5        VVs0 sH  u  pEXC" U5      _M     snn$ UR                  5        VVs0 sH  u  pEUS-   U-   U" U5      _M     nnn[        U S5      (       a:  U R                  R                  US-   5      (       d  US-   U R                  -   U l        U$ s  snnf s  snnf )Nc                >     [        U 5      $ ! [         a    U s $ f = fN)float
ValueError)values    r   maybe_to_float@SentenceEvaluator.prefix_name_to_metrics.<locals>.maybe_to_float:   s%    U|# s   
 _r   )r"   r   returnr   )itemshasattrr   
startswith)r   metricsnamer#   keyr"   s         r   prefix_name_to_metrics(SentenceEvaluator.prefix_name_to_metrics9   s    	 AHQ:3C..QQMT]]_]_zs4#:#^E%::_]4)**43F3F3Q3QRVY\R\3]3]"&*t/B/B"BD	 R]s   B4
B:c                <    UR                   R                  XX45        g r   )model_card_dataset_evaluation_metrics)r   r   r*   r   steps        r    store_metrics_in_model_card_data2SentenceEvaluator.store_metrics_in_model_card_dataG   s     	44TEPr   c                    U R                   R                  nUR                  S5      (       a  SUSS -   n UR                  S5      nUSU n[
        R                  " SSU5      $ ! [         a     N$f = f)z
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification

1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
CECrossEncoder   N	Evaluatorz([a-z])([A-Z])z\g<1> \g<2>)	__class____name__r)   index
IndexErrorresub)r   
class_namer<   s      r   descriptionSentenceEvaluator.descriptionL   s     ^^,,
  &&'*QR.8J	$$[1E#FU+J vv'DD  		s   A$ $
A10A1c                    0 $ )zk
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
r   r   s    r   get_config_dict!SentenceEvaluator.get_config_dictb   s	     	r   c                (    UR                   " U40 UD6$ )a-  
Call the encoder method of the model pass

Args:
    model (SentenceTransformer): Model we are evaluating
    sentences (str | list[str] | np.ndarray): Text that we are embedding

Returns:
    list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: The associated embedding
)encode)r   r   	sentenceskwargss       r   embed_inputsSentenceEvaluator.embed_inputsh   s      ||I000r   r   )NrL   )
r   r   r   z
str | Noner   intr   rM   r&   zfloat | dict[str, float])r*   dict[str, float]r+   strr&   rN   )r   r   )
r   r   r*   dict[str, Any]r   rM   r2   rM   r&   None)r&   rO   )r&   rP   )r   r   rH   zstr | list[str] | np.ndarrayr&   zPlist[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]])r;   
__module____qualname____firstlineno____doc__r   r   r-   r3   propertyrA   rD   rJ   __static_attributes__r   r   r   r	   r	      s    
#
 ik(7AQTbe	!6 `aQ(Q3AQJMQY\Q	Q
 E E*1"1 01
 
Z1r   r	   )
__future__r   r>   typingr   r   numpynptorchr   )sentence_transformers.SentenceTransformerr   r	   r   r   r   <module>r^      s&    " 	 %Mk1 k1r   