
    dh                     x    S SK r S SKJrJr  S SKrS SKJr  S SKJr  \ R                  " \
5      r " S S\\5      rg)    N)ListOptional)
Embeddings)	BaseModelc                       \ rS rSr% SrSr\\S'    Sr\	\
   \S'    S\S\\   4S	 jrS
\\   S\\\      4S jrS\S\\   4S jrSrg)LlamafileEmbeddings   a  Llamafile lets you distribute and run large language models with a
single file.

To get started, see: https://github.com/Mozilla-Ocho/llamafile

To use this class, you will need to first:

1. Download a llamafile.
2. Make the downloaded file executable: `chmod +x path/to/model.llamafile`
3. Start the llamafile in server mode with embeddings enabled:

    `./path/to/model.llamafile --server --nobrowser --embedding`

Example:
    .. code-block:: python

        from langchain_community.embeddings import LlamafileEmbeddings
        embedder = LlamafileEmbeddings()
        doc_embeddings = embedder.embed_documents(
            [
                "Alpha is the first letter of the Greek alphabet",
                "Beta is the second letter of the Greek alphabet",
            ]
        )
        query_embedding = embedder.embed_query(
            "What is the second letter of the Greek alphabet"
        )

zhttp://localhost:8080base_urlNrequest_timeouttextreturnc                     [         R                  " U R                   S3SS0SU0U R                  S9nUR                  5         UR                  5       nSU;  a  [        S	5      eUS   n[        U5      S
:X  a  [        S5      eU$ ! [         R                  R
                   a.    [         R                  R                  SU R                   S35      ef = f)Nz
/embeddingzContent-Typezapplication/jsoncontent)urlheadersjsontimeoutzTCould not connect to Llamafile server. Please make sure that a server is running at .	embeddingzPUnexpected output from /embedding endpoint, output dict missing 'embedding' key.g        z^Embedding sums to 0, did you start the llamafile server with the `--embedding` option enabled?)requestspostr
   r   
exceptionsConnectionErrorraise_for_statusr   KeyErrorsum
ValueError)selfr   responsecontentsr   s        `/var/www/html/shao/venv/lib/python3.13/site-packages/langchain_community/embeddings/llamafile.py_embedLlamafileEmbeddings._embed0   s    	}}}}oZ0"$6 t ,,	H" 	!!#==?h&+ 
 [)	 y>S 4 
 = ""22 	%%55//3}}oQ@ 	s   2B ACtextsc                 \    / nU H#  nUR                  U R                  U5      5        M%     U$ )zEmbed documents using a llamafile server running at `self.base_url`.
llamafile server should be started in a separate process before invoking
this method.

Args:
    texts: The list of texts to embed.

Returns:
    List of embeddings, one for each text.
)appendr"   )r   r$   doc_embeddingsr   s       r!   embed_documents#LlamafileEmbeddings.embed_documents\   s0     D!!$++d"34     c                 $    U R                  U5      $ )zEmbed a query using a llamafile server running at `self.base_url`.
llamafile server should be started in a separate process before invoking
this method.

Args:
    text: The text to embed.

Returns:
    Embeddings for the text.
)r"   )r   r   s     r!   embed_queryLlamafileEmbeddings.embed_queryl   s     {{4  r*    )__name__
__module____qualname____firstlineno____doc__r
   str__annotations__r   r   intr   floatr"   r(   r,   __static_attributes__r.   r*   r!   r   r      sv    < ,Hc+;%)OXc])%*3 *4; *XT#Y 4U3D  ! !U !r*   r   )loggingtypingr   r   r   langchain_core.embeddingsr   pydanticr   	getLoggerr/   loggerr   r.   r*   r!   <module>r?      s4     !  0 			8	$l!)Z l!r*   