
    f j                       d dl mZ d dlmZmZmZmZmZmZ d dl	m
Z
 d dlZddlmZ ddlmZmZ ddlmZmZmZmZmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddl m!Z!m"Z" ddl#m$Z$m%Z% ddl&m'Z' ddgZ( G d de          Z) G d de          Z* G d d          Z+ G d d          Z, G d d          Z- G d d          Z.dS )    )annotations)DictListUnionIterableOptionaloverload)LiteralN   )_legacy_response)
Completioncompletion_create_params)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)make_request_optionsCompletionsAsyncCompletionsc                  ^   e Zd Zed7d            Zed8d            Zeeeeeeeeeeeeeeeedddedd9d,            Zeeeeeeeeeeeeeeeddded-d:d0            Zeeeeeeeeeeeeeeeddded-d;d3            Z e	dd
gg d4          eeeeeeeeeeeeeeedddedd<d6            ZdS )=r   returnCompletionsWithRawResponsec                     t          |           S N)r#   selfs    pC:\Users\Terasoftware\OneDrive\Desktop\faahhh\fyndo\fyndo\venv\Lib\site-packages\openai/resources/completions.pywith_raw_responsezCompletions.with_raw_response   s    )$///     CompletionsWithStreamingResponsec                     t          |           S r%   )r+   r&   s    r(   with_streaming_responsez#Completions.with_streaming_response"   s    /555r*   Nbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamsuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutmodelKUnion[str, Literal['gpt-3.5-turbo-instruct', 'davinci-002', 'babbage-002']]promptCUnion[str, List[str], Iterable[int], Iterable[Iterable[int]], None]r/   Optional[int] | NotGivenr0   Optional[bool] | NotGivenr1   Optional[float] | NotGivenr2   #Optional[Dict[str, int]] | NotGivenr3   r4   r5   r6   r7   r8   0Union[Optional[str], List[str], None] | NotGivenr9   #Optional[Literal[False]] | NotGivenr:   Optional[str] | NotGivenr;   r<   r=   str | NotGivenr>   Headers | Noner?   Query | Noner@   Body | NonerA   'float | httpx.Timeout | None | NotGivenr   c                   dS u  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        N r'   rB   rD   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   s                         r(   createzCompletions.create&   
    h 	r*   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r:   r;   r<   r=   r>   r?   r@   rA   Literal[True]Stream[Completion]c                   dS u  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrT   r'   rB   rD   r9   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r:   r;   r<   r=   r>   r?   r@   rA   s                         r(   rV   zCompletions.create   rW   r*   boolCompletion | Stream[Completion]c                   dS r\   rT   r]   s                         r(   rV   zCompletions.createR  rW   r*   rB   rD   r9   3Optional[Literal[False]] | Literal[True] | NotGivenc          
        |                      dt          i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|t          j                  t	          ||||          t
          |pdt          t
                             S Nz/completionsrB   rD   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   )r>   r?   r@   rA   F)bodyoptionscast_tor9   
stream_cls)_postr   r   CompletionCreateParamsr   r   r   rU   s                         r(   rV   zCompletions.create  s9   8 zz Uf w D	
 (): !*  !*  '(8 D D f f ";  U!" D#& )?) , )+Q[el   ?Uj);  
 
 	
r*   )r"   r#   )r"   r+   ,rB   rC   rD   rE   r/   rF   r0   rG   r1   rH   r2   rI   r3   rF   r4   rF   r5   rF   r6   rH   r7   rF   r8   rJ   r9   rK   r:   rL   r;   rH   r<   rH   r=   rM   r>   rN   r?   rO   r@   rP   rA   rQ   r"   r   ),rB   rC   rD   rE   r9   rY   r/   rF   r0   rG   r1   rH   r2   rI   r3   rF   r4   rF   r5   rF   r6   rH   r7   rF   r8   rJ   r:   rL   r;   rH   r<   rH   r=   rM   r>   rN   r?   rO   r@   rP   rA   rQ   r"   rZ   ),rB   rC   rD   rE   r9   r^   r/   rF   r0   rG   r1   rH   r2   rI   r3   rF   r4   rF   r5   rF   r6   rH   r7   rF   r8   rJ   r:   rL   r;   rH   r<   rH   r=   rM   r>   rN   r?   rO   r@   rP   rA   rQ   r"   r_   ),rB   rC   rD   rE   r/   rF   r0   rG   r1   rH   r2   rI   r3   rF   r4   rF   r5   rF   r6   rH   r7   rF   r8   rJ   r9   rb   r:   rL   r;   rH   r<   rH   r=   rM   r>   rN   r?   rO   r@   rP   rA   rQ   r"   r_   
__name__
__module____qualname__r   r)   r-   r	   r   rV   r   rT   r*   r(   r   r      s       0 0 0 _0 6 6 6 _6  -6*38A:C-6/8&/7@)2AJ6?+42;,5( )-$("&;D3S S S S S XSj  -6*38A:C-6/8&/7@)2AJ+42;,5( )-$("&;D3S S S S S XSj  -6*38A:C-6/8&/7@)2AJ+42;,5( )-$("&;D3S S S S S XSj ]GX&(E(E(EFF -6*38A:C-6/8&/7@)2AJFO+42;,5( )-$("&;D39
 9
 9
 9
 9
 GF9
 9
 9
r*   c                  ^   e Zd Zed7d            Zed8d            Zeeeeeeeeeeeeeeeedddedd9d,            Zeeeeeeeeeeeeeeeddded-d:d0            Zeeeeeeeeeeeeeeeddded-d;d3            Z e	dd
gg d4          eeeeeeeeeeeeeeedddedd<d6            ZdS )=r    r"   AsyncCompletionsWithRawResponsec                     t          |           S r%   )rq   r&   s    r(   r)   z"AsyncCompletions.with_raw_response&  s    .t444r*   %AsyncCompletionsWithStreamingResponsec                     t          |           S r%   )rs   r&   s    r(   r-   z(AsyncCompletions.with_streaming_response*  s    4T:::r*   Nr.   rB   rC   rD   rE   r/   rF   r0   rG   r1   rH   r2   rI   r3   r4   r5   r6   r7   r8   rJ   r9   rK   r:   rL   r;   r<   r=   rM   r>   rN   r?   rO   r@   rP   rA   rQ   r   c               
   K   dS rS   rT   rU   s                         r(   rV   zAsyncCompletions.create.        h 	r*   rX   rY   AsyncStream[Completion]c               
   K   dS r\   rT   r]   s                         r(   rV   zAsyncCompletions.create  rv   r*   r^   $Completion | AsyncStream[Completion]c               
   K   dS r\   rT   r]   s                         r(   rV   zAsyncCompletions.createZ  rv   r*   ra   rb   c          
     :  K   |                      dt          i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|t          j                   d {V t	          ||||          t
          |pdt          t
                              d {V S rd   )ri   r   r   rj   r   r   r   rU   s                         r(   rV   zAsyncCompletions.create  sy     8 ZZ,Uf w D	
 (): !*  !*  '(8 D D f f ";  U!" D#& )?)       , )+Q[el   ?U":.;   
 
 
 
 
 
 
 
 	
r*   )r"   rq   )r"   rs   rk   ),rB   rC   rD   rE   r9   rY   r/   rF   r0   rG   r1   rH   r2   rI   r3   rF   r4   rF   r5   rF   r6   rH   r7   rF   r8   rJ   r:   rL   r;   rH   r<   rH   r=   rM   r>   rN   r?   rO   r@   rP   rA   rQ   r"   rw   ),rB   rC   rD   rE   r9   r^   r/   rF   r0   rG   r1   rH   r2   rI   r3   rF   r4   rF   r5   rF   r6   rH   r7   rF   r8   rJ   r:   rL   r;   rH   r<   rH   r=   rM   r>   rN   r?   rO   r@   rP   rA   rQ   r"   ry   ),rB   rC   rD   rE   r/   rF   r0   rG   r1   rH   r2   rI   r3   rF   r4   rF   r5   rF   r6   rH   r7   rF   r8   rJ   r9   rb   r:   rL   r;   rH   r<   rH   r=   rM   r>   rN   r?   rO   r@   rP   rA   rQ   r"   ry   rl   rT   r*   r(   r    r    %  s       5 5 5 _5 ; ; ; _;  -6*38A:C-6/8&/7@)2AJ6?+42;,5( )-$("&;D3S S S S S XSj  -6*38A:C-6/8&/7@)2AJ+42;,5( )-$("&;D3S S S S S XSj  -6*38A:C-6/8&/7@)2AJ+42;,5( )-$("&;D3S S S S S XSj ]GX&(E(E(EFF -6*38A:C-6/8&/7@)2AJFO+42;,5( )-$("&;D39
 9
 9
 9
 9
 GF9
 9
 9
r*   c                      e Zd ZddZdS )r#   completionsr   r"   Nonec                P    || _         t          j        |j                  | _        d S r%   )_completionsr   to_raw_response_wrapperrV   r'   r}   s     r(   __init__z#CompletionsWithRawResponse.__init__.  s(    '&>
 
r*   Nr}   r   r"   r~   rm   rn   ro   r   rT   r*   r(   r#   r#   -  (        
 
 
 
 
 
r*   r#   c                      e Zd ZddZdS )rq   r}   r    r"   r~   c                P    || _         t          j        |j                  | _        d S r%   )r   r   async_to_raw_response_wrapperrV   r   s     r(   r   z(AsyncCompletionsWithRawResponse.__init__7  s(    '&D
 
r*   Nr}   r    r"   r~   r   rT   r*   r(   rq   rq   6  r   r*   rq   c                      e Zd ZddZdS )r+   r}   r   r"   r~   c                F    || _         t          |j                  | _        d S r%   )r   r   rV   r   s     r(   r   z)CompletionsWithStreamingResponse.__init__@  s%    '2
 
r*   Nr   r   rT   r*   r(   r+   r+   ?  r   r*   r+   c                      e Zd ZddZdS )rs   r}   r    r"   r~   c                F    || _         t          |j                  | _        d S r%   )r   r   rV   r   s     r(   r   z.AsyncCompletionsWithStreamingResponse.__init__I  s%    '8
 
r*   Nr   r   rT   r*   r(   rs   rs   H  r   r*   rs   )/
__future__r   typingr   r   r   r   r   r	   typing_extensionsr
   httpx r   typesr   r   _typesr   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   _base_clientr   __all__r   r    r#   rq   r+   rs   rT   r*   r(   <module>r      s   # " " " " " B B B B B B B B B B B B B B B B % % % % % %        8 8 8 8 8 8 8 8 > > > > > > > > > > > > > >         
 & % % % % % 9 9 9 9 9 9 9 9 X X X X X X X X , , , , , , , ,      ,
-E
 E
 E
 E
 E
/ E
 E
 E
PE
 E
 E
 E
 E
' E
 E
 E
P
 
 
 
 
 
 
 

 
 
 
 
 
 
 

 
 
 
 
 
 
 

 
 
 
 
 
 
 
 
 
r*   