o
    CjiB                     @  s  d dl mZ d dlmZmZmZmZmZmZ d dl	m
Z
 d dlZddlmZmZ ddlmZmZmZmZmZ ddlmZmZ dd	lmZmZ dd
lmZmZ ddlmZm Z  ddl!m"Z" erfddl#m$Z$m%Z% ddgZ&G dd deZ'G dd deZ(G dd dZ)G dd dZ*dS )    )annotations)TYPE_CHECKINGDictListUnionOptionaloverload)LiteralN   )
Completioncompletion_create_params)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transform)SyncAPIResourceAsyncAPIResource)to_raw_response_wrapperasync_to_raw_response_wrapper)StreamAsyncStream)make_request_options)OpenAIAsyncOpenAICompletionsAsyncCompletionsc                        e Zd ZU ded< d; fddZeeeeeeeeeeeeeeeed	d	d	ed
d<d/d0Zeeeeeeeeeeeeeeed	d	d	ed1d=d4d0Zeeeeeeeeeeeeeeed	d	d	ed1d>d7d0Zeddgg d8eeeeeeeeeeeeeeed	d	d	ed
d?d:d0Z  Z	S )@r   CompletionsWithRawResponsewith_raw_responseclientr   returnNonec                      t  | t| | _d S N)super__init__r    r!   selfr"   	__class__ s/var/www/html/Resume-Parser/resume-parser-inhouse/venv/lib/python3.10/site-packages/openai/resources/completions.pyr(         zCompletions.__init__Nbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamsuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutmodelUnion[str, Literal['babbage-002', 'davinci-002', 'gpt-3.5-turbo-instruct', 'text-davinci-003', 'text-davinci-002', 'text-davinci-001', 'code-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001']]prompt7Union[str, List[str], List[int], List[List[int]], None]r1   Optional[int] | NotGivenr2   Optional[bool] | NotGivenr3   Optional[float] | NotGivenr4   #Optional[Dict[str, int]] | NotGivenr5   r6   r7   r8   r9   r:   0Union[Optional[str], List[str], None] | NotGivenr;   #Optional[Literal[False]] | NotGivenr<   Optional[str] | NotGivenr=   r>   r?   str | NotGivenr@   Headers | NonerA   Query | NonerB   Body | NonerC   'float | httpx.Timeout | None | NotGivenr   c                C     dS u\  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
              convert text to token IDs. Mathematically, the bias is added to the logits
              generated by the model prior to sampling. The exact effect will vary per model,
              but values between -1 and 1 should decrease or increase likelihood of selection;
              values like -100 or 100 should result in a ban or exclusive selection of the
              relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely tokens, as well the
              chosen tokens. For example, if `logprobs` is 5, the API will return a list of
              the 5 most likely tokens. The API will always return the `logprob` of the
              sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) to generate in the completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          suffix: The suffix that comes after a completion of inserted text.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr-   r*   rD   rF   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   r-   r-   r.   create       !zCompletions.creater1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r<   r=   r>   r?   r@   rA   rB   rC   Literal[True]Stream[Completion]c                C  rT   u\  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
              convert text to token IDs. Mathematically, the bias is added to the logits
              generated by the model prior to sampling. The exact effect will vary per model,
              but values between -1 and 1 should decrease or increase likelihood of selection;
              values like -100 or 100 should result in a ban or exclusive selection of the
              relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely tokens, as well the
              chosen tokens. For example, if `logprobs` is 5, the API will return a list of
              the 5 most likely tokens. The API will always return the `logprob` of the
              sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) to generate in the completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          suffix: The suffix that comes after a completion of inserted text.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr-   r*   rD   rF   r;   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r<   r=   r>   r?   r@   rA   rB   rC   r-   r-   r.   rW      rX   boolCompletion | Stream[Completion]c                C  rT   r\   r-   r]   r-   r-   r.   rW   c  rX   rD   rF   r;   3Optional[Literal[False]] | Literal[True] | NotGivenc             	   C  s   | j dti d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|tjt||||dt|pFdtt dS Nz/completionsrD   rF   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   )r@   rA   rB   rC   F)bodyoptionscast_tor;   
stream_cls)_postr   r   CompletionCreateParamsr   r   r   rV   r-   r-   r.   rW     s^   *	
)r"   r   r#   r$   ,rD   rE   rF   rG   r1   rH   r2   rI   r3   rJ   r4   rK   r5   rH   r6   rH   r7   rH   r8   rJ   r9   rH   r:   rL   r;   rM   r<   rN   r=   rJ   r>   rJ   r?   rO   r@   rP   rA   rQ   rB   rR   rC   rS   r#   r   ),rD   rE   rF   rG   r;   rZ   r1   rH   r2   rI   r3   rJ   r4   rK   r5   rH   r6   rH   r7   rH   r8   rJ   r9   rH   r:   rL   r<   rN   r=   rJ   r>   rJ   r?   rO   r@   rP   rA   rQ   rB   rR   rC   rS   r#   r[   ),rD   rE   rF   rG   r;   r^   r1   rH   r2   rI   r3   rJ   r4   rK   r5   rH   r6   rH   r7   rH   r8   rJ   r9   rH   r:   rL   r<   rN   r=   rJ   r>   rJ   r?   rO   r@   rP   rA   rQ   rB   rR   rC   rS   r#   r_   ),rD   rE   rF   rG   r1   rH   r2   rI   r3   rJ   r4   rK   r5   rH   r6   rH   r7   rH   r8   rJ   r9   rH   r:   rL   r;   ra   r<   rN   r=   rJ   r>   rJ   r?   rO   r@   rP   rA   rQ   rB   rR   rC   rS   r#   r_   
__name__
__module____qualname____annotations__r(   r   r   rW   r   __classcell__r-   r-   r+   r.   r         
  " " "c                      r   )@r   AsyncCompletionsWithRawResponser!   r"   r   r#   r$   c                   r%   r&   )r'   r(   rq   r!   r)   r+   r-   r.   r(   S  r/   zAsyncCompletions.__init__Nr0   rD   rE   rF   rG   r1   rH   r2   rI   r3   rJ   r4   rK   r5   r6   r7   r8   r9   r:   rL   r;   rM   r<   rN   r=   r>   r?   rO   r@   rP   rA   rQ   rB   rR   rC   rS   r   c                     dS rU   r-   rV   r-   r-   r.   rW   W      !zAsyncCompletions.createrY   rZ   AsyncStream[Completion]c                  rr   r\   r-   r]   r-   r-   r.   rW     rs   r^   $Completion | AsyncStream[Completion]c                  rr   r\   r-   r]   r-   r-   r.   rW     rs   r`   ra   c             	     s   | j dti d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|tjt||||dt|pGdtt dI d H S rb   )rg   r   r   rh   r   r   r   rV   r-   r-   r.   rW   =  s`   *	
)r"   r   r#   r$   ri   ),rD   rE   rF   rG   r;   rZ   r1   rH   r2   rI   r3   rJ   r4   rK   r5   rH   r6   rH   r7   rH   r8   rJ   r9   rH   r:   rL   r<   rN   r=   rJ   r>   rJ   r?   rO   r@   rP   rA   rQ   rB   rR   rC   rS   r#   rt   ),rD   rE   rF   rG   r;   r^   r1   rH   r2   rI   r3   rJ   r4   rK   r5   rH   r6   rH   r7   rH   r8   rJ   r9   rH   r:   rL   r<   rN   r=   rJ   r>   rJ   r?   rO   r@   rP   rA   rQ   rB   rR   rC   rS   r#   ru   ),rD   rE   rF   rG   r1   rH   r2   rI   r3   rJ   r4   rK   r5   rH   r6   rH   r7   rH   r8   rJ   r9   rH   r:   rL   r;   ra   r<   rN   r=   rJ   r>   rJ   r?   rO   r@   rP   rA   rQ   rB   rR   rC   rS   r#   ru   rj   r-   r-   r+   r.   r   P  rp   c                   @     e Zd ZdddZdS )	r    completionsr   r#   r$   c                 C     t |j| _d S r&   )r   rW   r*   rw   r-   r-   r.   r(        
z#CompletionsWithRawResponse.__init__N)rw   r   r#   r$   rk   rl   rm   r(   r-   r-   r-   r.   r          r    c                   @  rv   )	rq   rw   r   r#   r$   c                 C  rx   r&   )r   rW   ry   r-   r-   r.   r(     rz   z(AsyncCompletionsWithRawResponse.__init__N)rw   r   r#   r$   r{   r-   r-   r-   r.   rq     r|   rq   )+
__future__r   typingr   r   r   r   r   r   typing_extensionsr	   httpxtypesr   r   _typesr   r   r   r   r   _utilsr   r   	_resourcer   r   	_responser   r   
_streamingr   r   _base_clientr   _clientr   r   __all__r   r   r    rq   r-   r-   r-   r.   <module>   s4        <    <