
    dh"                         S SK Jr  S SKJrJrJrJrJrJrJ	r	  S SK Jr  S SK
Jr  S SK Jr  S SKJrJr  S SK Jr  S SKJr  S SK Jr  S SKJr  S SK Jr  S SKJr   " S S	\5      rg
)    )pre_init)AnyAsyncIteratorDictIteratorListOptionalUnion)root_validator)AsyncCallbackManagerForLLMRunCallbackManagerForLLMRun)LLM)enforce_stop_tokens)GenerationChunkc                      \ rS rSr% Sr\\S'   \\S'    Sr\	\
\\4      \S'    Sr\S\\
4   \S'    Sr\\S	'    \S
\
\\4   4S j5       r\S
\4S j5       r\S\
S
\
4S j5       r  SS\S\	\\      S\	\   S\S
\4
S jjr  SS\S\	\\      S\	\   S\S
\4
S jjr  SS\S\	\\      S\	\   S\S
\\   4
S jjr  SS\S\	\\      S\	\   S\S
\\   4
S jjrSrg)
DeepSparse   a$  Neural Magic DeepSparse LLM interface.
To use, you should have the ``deepsparse`` or ``deepsparse-nightly``
python package installed. See https://github.com/neuralmagic/deepsparse
This interface let's you deploy optimized LLMs straight from the
[SparseZoo](https://sparsezoo.neuralmagic.com/?useCase=text_generation)
Example:
    .. code-block:: python
        from langchain_community.llms import DeepSparse
        llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
pipelinemodelNmodel_configurationgeneration_configF	streamingreturnc                 `    U R                   U R                  U R                  U R                  S.$ )zGet the identifying parameters.)r   model_configr   r   )r   r   r   r   selfs    [/var/www/html/shao/venv/lib/python3.13/site-packages/langchain_community/llms/deepsparse.py_identifying_paramsDeepSparse._identifying_params1   s.     ZZ 44!%!7!7	
 	
    c                     g)zReturn type of llm.
deepsparse r   s    r   	_llm_typeDeepSparse._llm_type;   s     r!   valuesc                      SSK Jn  US   =(       d    0 nUR                  " S	SUS   S.UD6US'   U$ ! [         a    [        S5      ef = f)
z2Validate that ``deepsparse`` package is installed.r   )Pipelinez[Could not import `deepsparse` package. Please install it with `pip install deepsparse[llm]`r   text_generationr   )task
model_pathr   r$   )r#   r)   ImportErrorcreate)clsr'   r)   r   s       r   validate_environmentDeepSparse.validate_environment@   ss    	+ 34:%__ 
"g
 
z
   	G 	s	   3 A	promptstoprun_managerkwargsc                    U R                   (       a/  SnU R                  " SXUS.UD6 H  nXVR                  -  nM     UnO5U R                  " SSU0U R                  D6R
                  S   R                  nUb  [        Xr5      nU$ )  Generate text from a prompt.
Args:
    prompt: The prompt to generate text from.
    stop: A list of strings to stop generation when encountered.
Returns:
    The generated text.
Example:
    .. code-block:: python
        from langchain_community.llms import DeepSparse
        llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
        llm.invoke("Tell me a joke.")
 r2   r3   r4   	sequencesr   r$   )r   _streamtextr   r   generationsr   r   r2   r3   r4   r5   combined_outputchunkr<   s           r   _callDeepSparse._callT   s    & >> O kEK  ::- #D II$2H2HIQ   &t2Dr!   c                 "  #    U R                   (       a0  SnU R                  " SXUS.UD6  Sh  vN nXVR                  -  nM  U R                  " SSU0U R                  D6R
                  S   R                  nUb  [        Xr5      nU$  NY
 UnN7f)r7   r8   r9   Nr:   r   r$   )r   _astreamr<   r   r   r=   r   r>   s           r   _acallDeepSparse._acallz   s     & >> O#}}  k EK  .e  ::- II$2H2HIQ   &t2D.   #Ds&   )BBB	BAB	BBc              +      #    U R                   " SUSS.U R                  D6nU HG  n[        UR                  S   R                  S9nU(       a  UR                  UR                  S9  Uv   MI     g7fa  Yields results objects as they are generated in real time.
        It also calls the callback manager's on_llm_new_token event with
        similar parameters to the OpenAI LLM class method of the same name.
        Args:
            prompt: The prompt to pass into the model.
            stop: Optional list of stop words to use when generating.
        Returns:
            A generator representing the stream of tokens being generated.
        Yields:
            A dictionary like object containing a string token.
        Example:
            .. code-block:: python
                from langchain_community.llms import DeepSparse
                llm = DeepSparse(
                    model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none",
                    streaming=True
                )
                for chunk in llm.stream("Tell me a joke",
                        stop=["'","
"]):
                    print(chunk, end='', flush=True)  # noqa: T201
        T)r:   r   r   )r<   )tokenNr$   r   r   r   r=   r<   on_llm_new_tokenr   r2   r3   r4   r5   	inferencerI   r@   s           r   r;   DeepSparse._stream   sq     8 MM 

040F0F
	 E#):):1)=)B)BCE,,5::,>K s   A.A0c                   #    U R                   " SUSS.U R                  D6nU HP  n[        UR                  S   R                  S9nU(       a!  UR                  UR                  S9I Sh  vN   U7v   MR     g N7frH   rJ   rL   s           r   rD   DeepSparse._astream   s}     8 MM 

040F0F
	 E#):):1)=)B)BCE!222DDDK  Es   A(A;*A9+A;r$   )NN)__name__
__module____qualname____firstlineno____doc__r   __annotations__strr   r	   r   r   r
   r   boolpropertyr   r%   r   r0   r   r   rA   r   rE   r   r   r;   r   rD   __static_attributes__r$   r!   r   r   r      s    	 MJV48$sCx.18E 15uT3_-4)
 It8
T#s(^ 
 
 3   $ 4  , %):>	$$ tCy!$ 67	$
 $ 
$R %)?C	$$ tCy!$ ;<	$
 $ 
$R %):>	$$ tCy!$ 67	$
 $ 
/	"$R %)?C	$$ tCy!$ ;<	$
 $ 
	'$ $r!   r   N)langchain_core.utilsr   typingr   r   r   r   r   r	   r
   pydanticr   langchain_core.callbacksr   r   #langchain_core.language_models.llmsr   langchain_community.llms.utilsr   langchain_core.outputsr   r   r$   r!   r   <module>rb      sA    ) L L L ) # ) * 3 ) > ) 2W Wr!   