
    Ch[                        S SK Jr  S SKrS SKrS SKJr  S SKJr  S SKJ	r	J
r
Jr  \R                  " \5      rSS jrSS jrg)	    )annotationsN)Path)PretrainedConfig)_save_pretrained_wrapperbackend_should_exportbackend_warn_to_savec                8    SSK nSSKJnJnJnJn  UUUS.n	X);  a0  SR                  U	R                  5       5      n
[        SU SU
 35      eX   nUR                  S	UR                  5       S   5      US	'   [        U 5      nUR                  5       nS
nSn[        XX5X5      u  nnU(       a  UR                  SS5        UR                   " U 4UUS.UD6n[#        UR$                  SS9Ul        U(       a  ['        XU5        U$ ! [         a    [        S5      ef = f)a  
Load and perhaps export an ONNX model using the Optimum library.

Args:
    model_name_or_path (str): The model name on Hugging Face (e.g. 'naver/splade-cocondenser-ensembledistil')
        or the path to a local model directory.
    config (PretrainedConfig): The model configuration.
    task_name (str): The task name for the model (e.g. 'feature-extraction', 'fill-mask', 'sequence-classification').
    model_kwargs (dict): Additional keyword arguments for the model loading.
r   N)ONNX_WEIGHTS_NAMEORTModelForFeatureExtractionORTModelForMaskedLM!ORTModelForSequenceClassificationzfeature-extractionz	fill-maskzsequence-classification, Unsupported task: . Supported tasks: zUsing the ONNX backend requires installing Optimum and ONNX Runtime. You can install them with pip: `pip install optimum[onnxruntime]` or `pip install optimum[onnxruntime-gpu]`providerONNXz*.onnx	file_nameconfigexportonnx	subfolder)onnxruntimeoptimum.onnxruntimer
   r   r   r   joinkeys
ValueErrorModuleNotFoundError	Exceptionpopget_available_providersr   existsr   from_pretrainedr   _save_pretrainedr   )model_name_or_pathr   	task_namemodel_kwargsortr
   r   r   r   task_to_model_mappingsupported_tasks	model_cls	load_pathis_localbackend_nametarget_file_globr   models                     Z/var/www/html/shao/venv/lib/python3.13/site-packages/sentence_transformers/backend/load.pyload_onnx_modelr4      s`   
!	
 	
 #?,'H!
 1"ii(=(B(B(DEO1)<OP_O`abb)4	  ,//
C<W<W<YZ[<\]L'(I!HL 1\>NFL
 d+ %% 	E 6e6L6LX^_E /<HLQ  
8
 	

s   AD Dc                    SSK JnJnJnJn  UUUS.nX(;  a0  SR                  UR                  5       5      n	[        SU SU	 35      eX   n
[        U 5      nUR                  5       nSnS	n[        XX4X5      u  pU(       a  UR                  S
S5        SU;   aq  US   n[        U[        5      (       dV  [        U5      R                  5       (       d  [        S5      e[!        USS9 n["        R$                  " U5      US'   SSS5        OO0 US'   U
R&                  " U 4UUS.UD6n[)        UR*                  SS9Ul        U(       a  [-        XU5        U$ ! [         a    [        S5      ef = f! , (       d  f       Nk= f)a  
Load and perhaps export an OpenVINO model using the Optimum library.

Args:
    model_name_or_path (str): The model name on Hugging Face (e.g. 'naver/splade-cocondenser-ensembledistil')
        or the path to a local model directory.
    config (PretrainedConfig): The model configuration.
    task_name (str): The task name for the model (e.g. 'feature-extraction', 'fill-mask', 'sequence-classification').
    model_kwargs (dict): Additional keyword arguments for the model loading.
r   )OV_XML_FILE_NAMEOVModelForFeatureExtractionOVModelForMaskedLM OVModelForSequenceClassificationr   r   r   r   zUsing the OpenVINO backend requires installing Optimum and OpenVINO. You can install them with pip: `pip install optimum[openvino]`OpenVINOzopenvino*.xmlr   N	ov_configzXov_config should be a dictionary or a path to a .json file containing an OpenVINO configzutf-8)encodingr   openvinor   )optimum.intel.openvinor6   r7   r8   r9   r   r   r   r    r!   r   r$   r   r"   
isinstancedictopenjsonloadr%   r   r&   r   )r'   r   r(   r)   r6   r7   r8   r9   r+   r,   r-   r.   r/   r0   r1   r   r;   fr2   s                      r3   load_openvino_modelrE   Z   s   
	
 	
 #>+'G!
 1"ii(=(B(B(DEO1)<OP_O`abb)4	 '(I!HL& 1\=MF
 d+ l" -	)T**	?))++ n  i'2a,0IIaL[) 32 + %'[! %% 	E 6e6L6LXbcE /<HLc  
M
 	

: 32s   AE 'E-E*-
E;)r'   strr   r   r(   rF   )
__future__r   rB   loggingpathlibr    transformers.configuration_utilsr   #sentence_transformers.backend.utilsr   r   r   	getLogger__name__loggerr4   rE        r3   <module>rQ      s7    "    = u u			8	$IXQrP   