o
    j                     @  s8  d dl mZ d dlmZmZmZmZmZmZ d dl	m
Z
 d dlZddlmZ ddlmZmZ ddlmZmZmZmZmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddl m!Z!m"Z" ddl#m$Z$m%Z% ddl&m'Z' ddgZ(G dd deZ)G dd deZ*G dd dZ+G dd dZ,G dd dZ-G dd dZ.dS )    )annotations)DictListUnionIterableOptionaloverload)LiteralN   )_legacy_response)
Completioncompletion_create_params)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)make_request_optionsCompletionsAsyncCompletionsc                   @    e Zd Zed:ddZed;ddZeeeeeeeeeeeeeeeeddded	d<d.d/Zeeeeeeeeeeeeeeeddded0d=d3d/Zeeeeeeeeeeeeeeeddded0d>d6d/Ze	d
dgg d7eeeeeeeeeeeeeeeddded	d?d9d/ZdS )@r   returnCompletionsWithRawResponsec                 C     t | S N)r"   self r'   b/var/www/html/fyndo/pharma/fyndo/venv/lib/python3.10/site-packages/openai/resources/completions.pywith_raw_response      zCompletions.with_raw_response CompletionsWithStreamingResponsec                 C  r#   r$   )r+   r%   r'   r'   r(   with_streaming_response"   r*   z#Completions.with_streaming_responseNbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamsuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutmodelKUnion[str, Literal['gpt-3.5-turbo-instruct', 'davinci-002', 'babbage-002']]promptCUnion[str, List[str], Iterable[int], Iterable[Iterable[int]], None]r.   Optional[int] | NotGivenr/   Optional[bool] | NotGivenr0   Optional[float] | NotGivenr1   #Optional[Dict[str, int]] | NotGivenr2   r3   r4   r5   r6   r7   0Union[Optional[str], List[str], None] | NotGivenr8   #Optional[Literal[False]] | NotGivenr9   Optional[str] | NotGivenr:   r;   r<   str | NotGivenr=   Headers | Noner>   Query | Noner?   Body | Noner@   'float | httpx.Timeout | None | NotGivenr   c                C     dS u  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr'   r&   rA   rC   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   r'   r'   r(   create&       zCompletions.creater.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r9   r:   r;   r<   r=   r>   r?   r@   Literal[True]Stream[Completion]c                C  rQ   u  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr'   r&   rA   rC   r8   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r9   r:   r;   r<   r=   r>   r?   r@   r'   r'   r(   rT      rU   boolCompletion | Stream[Completion]c                C  rQ   rY   r'   rZ   r'   r'   r(   rT   R  rU   rA   rC   r8   3Optional[Literal[False]] | Literal[True] | NotGivenc             	   C  s   | j dti d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|tjt||||dt|pFdtt dS Nz/completionsrA   rC   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   )r=   r>   r?   r@   F)bodyoptionscast_tor8   
stream_cls)_postr   r   CompletionCreateParamsr   r   r   rS   r'   r'   r(   rT     s^   	
)r!   r"   )r!   r+   ,rA   rB   rC   rD   r.   rE   r/   rF   r0   rG   r1   rH   r2   rE   r3   rE   r4   rE   r5   rG   r6   rE   r7   rI   r8   rJ   r9   rK   r:   rG   r;   rG   r<   rL   r=   rM   r>   rN   r?   rO   r@   rP   r!   r   ),rA   rB   rC   rD   r8   rW   r.   rE   r/   rF   r0   rG   r1   rH   r2   rE   r3   rE   r4   rE   r5   rG   r6   rE   r7   rI   r9   rK   r:   rG   r;   rG   r<   rL   r=   rM   r>   rN   r?   rO   r@   rP   r!   rX   ),rA   rB   rC   rD   r8   r[   r.   rE   r/   rF   r0   rG   r1   rH   r2   rE   r3   rE   r4   rE   r5   rG   r6   rE   r7   rI   r9   rK   r:   rG   r;   rG   r<   rL   r=   rM   r>   rN   r?   rO   r@   rP   r!   r\   ),rA   rB   rC   rD   r.   rE   r/   rF   r0   rG   r1   rH   r2   rE   r3   rE   r4   rE   r5   rG   r6   rE   r7   rI   r8   r^   r9   rK   r:   rG   r;   rG   r<   rL   r=   rM   r>   rN   r?   rO   r@   rP   r!   r\   
__name__
__module____qualname__r   r)   r,   r   r   rT   r   r'   r'   r'   r(   r             c                   @  r    )@r   r!   AsyncCompletionsWithRawResponsec                 C  r#   r$   )rl   r%   r'   r'   r(   r)   &  r*   z"AsyncCompletions.with_raw_response%AsyncCompletionsWithStreamingResponsec                 C  r#   r$   )rm   r%   r'   r'   r(   r,   *  r*   z(AsyncCompletions.with_streaming_responseNr-   rA   rB   rC   rD   r.   rE   r/   rF   r0   rG   r1   rH   r2   r3   r4   r5   r6   r7   rI   r8   rJ   r9   rK   r:   r;   r<   rL   r=   rM   r>   rN   r?   rO   r@   rP   r   c                     dS rR   r'   rS   r'   r'   r(   rT   .      zAsyncCompletions.createrV   rW   AsyncStream[Completion]c                  rn   rY   r'   rZ   r'   r'   r(   rT     ro   r[   $Completion | AsyncStream[Completion]c                  rn   rY   r'   rZ   r'   r'   r(   rT   Z  ro   r]   r^   c             	     s   | j dti d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|tjI d H t||||dt|pJdtt dI d H S r_   )rd   r   r   re   r   r   r   rS   r'   r'   r(   rT     s`   	
)r!   rl   )r!   rm   rf   ),rA   rB   rC   rD   r8   rW   r.   rE   r/   rF   r0   rG   r1   rH   r2   rE   r3   rE   r4   rE   r5   rG   r6   rE   r7   rI   r9   rK   r:   rG   r;   rG   r<   rL   r=   rM   r>   rN   r?   rO   r@   rP   r!   rp   ),rA   rB   rC   rD   r8   r[   r.   rE   r/   rF   r0   rG   r1   rH   r2   rE   r3   rE   r4   rE   r5   rG   r6   rE   r7   rI   r9   rK   r:   rG   r;   rG   r<   rL   r=   rM   r>   rN   r?   rO   r@   rP   r!   rq   ),rA   rB   rC   rD   r.   rE   r/   rF   r0   rG   r1   rH   r2   rE   r3   rE   r4   rE   r5   rG   r6   rE   r7   rI   r8   r^   r9   rK   r:   rG   r;   rG   r<   rL   r=   rM   r>   rN   r?   rO   r@   rP   r!   rq   rg   r'   r'   r'   r(   r   %  rk   c                   @     e Zd ZdddZdS )	r"   completionsr   r!   Nonec                 C     || _ t|j| _d S r$   )_completionsr   to_raw_response_wrapperrT   r&   rs   r'   r'   r(   __init__.     
z#CompletionsWithRawResponse.__init__Nrs   r   r!   rt   rh   ri   rj   ry   r'   r'   r'   r(   r"   -      r"   c                   @  rr   )	rl   rs   r   r!   rt   c                 C  ru   r$   )rv   r   async_to_raw_response_wrapperrT   rx   r'   r'   r(   ry   7  rz   z(AsyncCompletionsWithRawResponse.__init__Nrs   r   r!   rt   r|   r'   r'   r'   r(   rl   6  r}   rl   c                   @  rr   )	r+   rs   r   r!   rt   c                 C     || _ t|j| _d S r$   )rv   r   rT   rx   r'   r'   r(   ry   @     
z)CompletionsWithStreamingResponse.__init__Nr{   r|   r'   r'   r'   r(   r+   ?  r}   r+   c                   @  rr   )	rm   rs   r   r!   rt   c                 C  r   r$   )rv   r   rT   rx   r'   r'   r(   ry   I  r   z.AsyncCompletionsWithStreamingResponse.__init__Nr   r|   r'   r'   r'   r(   rm   H  r}   rm   )/
__future__r   typingr   r   r   r   r   r   typing_extensionsr	   httpx r   typesr   r   _typesr   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   _base_clientr   __all__r   r   r"   rl   r+   rm   r'   r'   r'   r(   <module>   s8            			