
    dhf0                        S SK Jr  S SKrS SKJr  S SKJrJrJrJ	r	J
r
Jr  S SKJr  S SKJr  S SKJr  S SKJrJr  S S	KJr  S S
KJrJr  \R4                  " \5      r " S S\5      rg)    )annotationsN)Path)AnyDictIteratorListOptionalUnion)CallbackManagerForLLMRun)LLM)GenerationChunk)get_pydantic_field_namespre_init)_build_model_kwargs)Fieldmodel_validatorc                  n   \ rS rSr% SrSrS\S'   S\S'    SrS\S	'    SrS\S
'    \	" SSS9r
S\S'    \	" SSS9rS\S'    \	" SSS9rS\S'    \	" SSS9rS\S'    \	" SSS9rS\S'    \	" SSS9rS\S'    \	" SSS9rS\S'    \	" SSS9rS\S'    \	" SSS9rS\S'    \	" SSS9rS\S'    \	" S5      rS\S'    SrS\S '    S!rS"\S#'    S$rS"\S%'    \	" S5      rS\S&'    SrS'\S('    / rS)\S*'    S+rS"\S,'    S-rS\S.'    S/rS\S0'    SrS'\S1'    S2rS3\S4'    S5r S3\S6'    \	" \!S79r"S8\S9'    Sr#S\S:'    Sr$S;\S<'    Sr%S=\S>'    Sr&S\S?'    \'SLS@ j5       r(\)" SASB9\*SMSC j5       5       r+\,SNSD j5       r-\,SNSE j5       r.\,SOSF j5       r/SPSQSG jjr0  SR         SSSH jjr1  SR         STSI jjr2SUSJ jr3SKr4g)VLlamaCpp   ap  llama.cpp model.

To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python

Example:
    .. code-block:: python

        from langchain_community.llms import LlamaCpp
        llm = LlamaCpp(model_path="/path/to/llama/model")
Nr   clientstr
model_pathzOptional[str]	lora_base	lora_pathi   n_ctx)aliasintn_partsseedTf16_kvboolF
logits_all
vocab_only	use_mlock	n_threadszOptional[int]   n_batchn_gpu_layerssuffix   
max_tokensg?zOptional[float]temperaturegffffff?top_plogprobszOptional[bool]echoOptional[List[str]]stopg?repeat_penalty(   top_k@   last_n_tokens_sizeuse_mmapg      ?floatrope_freq_scaleg     @rope_freq_base)default_factoryDict[str, Any]model_kwargs	streamingzOptional[Union[str, Path]]grammar_pathzOptional[Union[str, Any]]grammarverbosec                >    SSK JnJn  US   n/ SQnU Vs0 sH  ofX   _M	     nnUS   b  US   US'   UR	                  US   5         U" U40 UD6US	'   US   (       a(  US   (       a  US   n	US   n
[        SU	< SU
< S35      e[        US   [        5      (       a  UR                  US   5      US'   U$ US   (       a  UR                  US   5      US'   U$  U$ ! [         a    [        S5      ef = fs  snf ! [
         a  n[        S
U SU 35      eSnAff = f)z4Validate that llama-cpp-python library is installed.r   )LlamaLlamaGrammarzCould not import llama-cpp-python library. Please install the llama-cpp-python library to use this embedding model: pip install llama-cpp-pythonr   )r:   r;   r   r   r   r   r    r!   r#   r$   r%   r&   r(   r8   r7   rB   r)   Nr>   r   z&Could not load Llama model from path: z. Received error rA   r@   zCCan only pass in one of grammar and grammar_path. Received grammar=z and grammar_path=.)	llama_cpprD   rE   ImportErrorupdate	Exception
ValueError
isinstancer   from_string	from_file)clsvaluesrD   rE   r   model_param_nameskmodel_paramserA   r@   s              Y/var/www/html/shao/venv/lib/python3.13/site-packages/langchain_community/llms/llamacpp.pyvalidate_environmentLlamaCpp.validate_environment   s   	5 L)

$ /@@.?69.?@.!-+1.+AL(F>23	$Z@<@F8 )!7Y'G!.1L*/,3  y)3// , 8 8	9J KF9
 	 N# , 6 6vn7M NF9  m  	I 	4 A  	8 E""#& 	s(   C C7	C< C4<
DDDbefore)modec                2    [        U 5      n[        X5      nU$ )z>Build extra kwargs from additional params that were passed in.)r   r   )rO   rP   all_required_field_namess      rU   build_model_kwargsLlamaCpp.build_model_kwargs   s     $<C#@ $VF    c           
        U R                   U R                  U R                  U R                  U R                  U R
                  U R                  U R                  U R                  S.	nU R                  (       a  U R                  US'   U$ )z1Get the default parameters for calling llama_cpp.)	r*   r,   r-   r.   r/   r0   stop_sequencesr3   r5   rA   )
r*   r,   r-   r.   r/   r0   r2   r3   r5   rA   )selfparamss     rU   _default_paramsLlamaCpp._default_params   sj     kk//++ZZII"ii"11ZZ

 << $F9r^   c                :    0 SU R                   0EU R                  E$ )zGet the identifying parameters.r   )r   rc   ra   s    rU   _identifying_paramsLlamaCpp._identifying_params   s$     K<1JT5I5IJJr^   c                    g)zReturn type of llm.llamacpp rf   s    rU   	_llm_typeLlamaCpp._llm_type   s     r^   c                    U R                   (       a  Ub  [        S5      eU R                  nUR                  S5        U R                   =(       d    U=(       d    / US'   U$ )z
Performs sanity check, preparing parameters in format needed by llama_cpp.

Args:
    stop (Optional[List[str]]): List of stop sequences for llama_cpp.

Returns:
    Dictionary containing the combined parameters.
z2`stop` found in both the input and default params.r`   r2   )r2   rK   rc   pop)ra   r2   rb   s      rU   _get_parametersLlamaCpp._get_parameters   sT     99)QRR%% 	

#$ 0d0bvr^   c                    U R                   (       a/  SnU R                  " SUUUS.UD6 H  nXVR                  -  nM     U$ U R                  U5      n0 UEUEnU R                  " SSU0UD6nUS   S   S   $ )a  Call the Llama model and return the output.

Args:
    prompt: The prompt to use for generation.
    stop: A list of strings to stop generation when encountered.

Returns:
    The generated text.

Example:
    .. code-block:: python

        from langchain_community.llms import LlamaCpp
        llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
        llm.invoke("This is a prompt.")
 )promptr2   run_managerrt   choicesr   textrk   )r?   _streamrw   rp   r   )	ra   rt   r2   ru   kwargscombined_text_outputchunkrb   results	            rU   _callLlamaCpp._call  s    . >> $&  ' 	 %

2$ ('))$/F))&)F[[99&9F)$Q'//r^   c              +  ,  #    0 U R                  U5      EUEnU R                  " S
USS.UD6nU H`  nUS   S   R                  SS5      n[        US   S   S   SU0S9n	U(       a%  UR	                  U	R
                  U R                  US	9  U	v   Mb     g7f)a\  Yields results objects as they are generated in real time.

        It also calls the callback manager's on_llm_new_token event with
        similar parameters to the OpenAI LLM class method of the same name.

        Args:
            prompt: The prompts to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            A generator representing the stream of tokens being generated.

        Yields:
            A dictionary like objects containing a string token and metadata.
            See llama-cpp-python docs and below for more.

        Example:
            .. code-block:: python

                from langchain_community.llms import LlamaCpp
                llm = LlamaCpp(
                    model_path="/path/to/local/model.bin",
                    temperature = 0.5
                )
                for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
                        stop=["'","
"]):
                    result = chunk["choices"][0]
                    print(result["text"], end='', flush=True)  # noqa: T201

        T)rt   streamrv   r   r/   Nrw   )rw   generation_info)tokenrB   	log_probsrk   )rp   r   getr   on_llm_new_tokenrw   rB   )
ra   rt   r2   ru   ry   rb   r|   partr/   r{   s
             rU   rx   LlamaCpp._stream,  s     J :D((.9&9BF4B6BDIq)--j$?H#)_Q'/!+X 6E ,,**dllh -  K s   BBc                l    U R                   R                  UR                  S5      5      n[        U5      $ )Nzutf-8)r   tokenizeencodelen)ra   rw   tokenized_texts      rU   get_num_tokensLlamaCpp.get_num_tokens_  s*    --dkk'.BC>""r^   rk   )rP   r   returnr   )rP   r=   r   r   )r   r=   )r   r   )N)r2   r1   r   r=   )NN)
rt   r   r2   r1   ru   "Optional[CallbackManagerForLLMRun]ry   r   r   r   )
rt   r   r2   r1   ru   r   ry   r   r   zIterator[GenerationChunk])rw   r   r   r   )5__name__
__module____qualname____firstlineno____doc__r   __annotations__r   r   r   r   r   r    r!   r#   r$   r%   r&   r(   r)   r*   r,   r-   r.   r/   r0   r2   r3   r5   r7   r8   r:   r;   dictr>   r?   r@   rA   rB   r   rV   r   classmethodr\   propertyrc   rg   rl   rp   r}   rx   r   __static_attributes__rk   r^   rU   r   r      s    FCO+#I}#0#I}#As'*E3*9-GS-? b'D#'-X.FD.1U,7J7@U,7J7/E5It5,$T=I}=C #1I6G]6/ #(N"CL-CF!$KFM'S #J#3#&K&.!E?!.#DkHm)N D. % "D
"@&)NO)2E=.(**M#Hn#1 OU )#NE#+#(#>L.>?It8/3L,3 *.G&- GT): :x (#  $  " K K  8 %):>	(0(0 "(0 8	(0
 (0 
(0Z %):>	11 "1 8	1
 1 
#1f#r^   r   )
__future__r   loggingpathlibr   typingr   r   r   r   r	   r
   langchain_core.callbacksr   #langchain_core.language_models.llmsr   langchain_core.outputsr   langchain_core.utilsr   r   langchain_core.utils.utilsr   pydanticr   r   	getLoggerr   loggerr   rk   r^   rU   <module>r      sD    "   = = = 3 2 C : +			8	$P#s P#r^   