
    f j                      d dl mZ d dlmZmZmZmZmZmZ d dl	m
Z
 d dlZddlmZ ddlmZmZmZmZmZ ddlmZmZmZ dd	lmZ dd
lmZmZ ddlmZmZ ddl m!Z!m"Z" ddl#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z) ddl*m+Z+ ddgZ, G d de          Z- G d de          Z. G d d          Z/ G d d          Z0 G d d          Z1 G d d          Z2dS )    )annotations)DictListUnionIterableOptionaloverload)LiteralN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)ChatCompletionChatCompletionChunkChatCompletionToolParamChatCompletionMessageParam#ChatCompletionToolChoiceOptionParamcompletion_create_params)make_request_optionsCompletionsAsyncCompletionsc                  v   e Zd Zed>d            Zed?d            Zeeeeeeeeeeeeeeeeeeedddedd@d3            Zeeeeeeeeeeeeeeeeeeddded4dAd7            Zeeeeeeeeeeeeeeeeeeddded4dBd:            Z e	dd
gg d;          eeeeeeeeeeeeeeeeeedddeddCd=            ZdS )Dr#   returnCompletionsWithRawResponsec                     t          |           S N)r'   selfs    uC:\Users\Terasoftware\OneDrive\Desktop\faahhh\fyndo\fyndo\venv\Lib\site-packages\openai/resources/chat/completions.pywith_raw_responsezCompletions.with_raw_response%   s    )$///     CompletionsWithStreamingResponsec                     t          |           S r)   )r/   r*   s    r,   with_streaming_responsez#Completions.with_streaming_response)   s    /555r.   Nfrequency_penaltyfunction_call	functions
logit_biaslogprobs
max_tokensnpresence_penaltyresponse_formatseedstopstreamtemperaturetool_choicetoolstop_logprobstop_puserextra_headersextra_query
extra_bodytimeoutmessages$Iterable[ChatCompletionMessageParam]model\  Union[str, Literal['gpt-4-0125-preview', 'gpt-4-turbo-preview', 'gpt-4-1106-preview', 'gpt-4-vision-preview', 'gpt-4', 'gpt-4-0314', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0314', 'gpt-4-32k-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-1106', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613']]r3   Optional[float] | NotGivenr4   0completion_create_params.FunctionCall | NotGivenr5   6Iterable[completion_create_params.Function] | NotGivenr6   #Optional[Dict[str, int]] | NotGivenr7   Optional[bool] | NotGivenr8   Optional[int] | NotGivenr9   r:   r;   2completion_create_params.ResponseFormat | NotGivenr<   r=   *Union[Optional[str], List[str]] | NotGivenr>   #Optional[Literal[False]] | NotGivenr?   r@   .ChatCompletionToolChoiceOptionParam | NotGivenrA   ,Iterable[ChatCompletionToolParam] | NotGivenrB   rC   rD   str | NotGivenrE   Headers | NonerF   Query | NonerG   Body | NonerH   'float | httpx.Timeout | None | NotGivenr   c                   dS a  
        Creates a model response for the given chat conversation.

        Args:
          messages: A list of messages comprising the conversation so far.
              [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).

          model: ID of the model to use. See the
              [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
              table for details on which models work with the Chat API.

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          function_call: Deprecated in favor of `tool_choice`.

              Controls which (if any) function is called by the model. `none` means the model
              will not call a function and instead generates a message. `auto` means the model
              can pick between generating a message or calling a function. Specifying a
              particular function via `{"name": "my_function"}` forces the model to call that
              function.

              `none` is the default when no functions are present. `auto` is the default if
              functions are present.

          functions: Deprecated in favor of `tools`.

              A list of functions the model may generate JSON inputs for.

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the
              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
              bias is added to the logits generated by the model prior to sampling. The exact
              effect will vary per model, but values between -1 and 1 should decrease or
              increase likelihood of selection; values like -100 or 100 should result in a ban
              or exclusive selection of the relevant token.

          logprobs: Whether to return log probabilities of the output tokens or not. If true,
              returns the log probabilities of each output token returned in the `content` of
              `message`. This option is currently not available on the `gpt-4-vision-preview`
              model.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
              completion.

              The total length of input tokens and generated tokens is limited by the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many chat completion choices to generate for each input message. Note that
              you will be charged based on the number of generated tokens across all of the
              choices. Keep `n` as `1` to minimize costs.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          response_format: An object specifying the format that the model must output. Compatible with
              [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
              all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          seed: This feature is in Beta. If specified, our system will make a best effort to
              sample deterministically, such that repeated requests with the same `seed` and
              parameters should return the same result. Determinism is not guaranteed, and you
              should refer to the `system_fingerprint` response parameter to monitor changes
              in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens.

          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
              sent as data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          tool_choice: Controls which (if any) function is called by the model. `none` means the model
              will not call a function and instead generates a message. `auto` means the model
              can pick between generating a message or calling a function. Specifying a
              particular function via
              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
              call that function.

              `none` is the default when no functions are present. `auto` is the default if
              functions are present.

          tools: A list of tools the model may call. Currently, only functions are supported as a
              tool. Use this to provide a list of functions the model may generate JSON inputs
              for. A max of 128 functions are supported.

          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
              return at each token position, each with an associated log probability.
              `logprobs` must be set to `true` if this parameter is used.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        N r+   rI   rK   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   s                            r,   createzCompletions.create-   
    x 	r.   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   Literal[True]Stream[ChatCompletionChunk]c                   dS a  
        Creates a model response for the given chat conversation.

        Args:
          messages: A list of messages comprising the conversation so far.
              [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).

          model: ID of the model to use. See the
              [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
              table for details on which models work with the Chat API.

          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
              sent as data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          function_call: Deprecated in favor of `tool_choice`.

              Controls which (if any) function is called by the model. `none` means the model
              will not call a function and instead generates a message. `auto` means the model
              can pick between generating a message or calling a function. Specifying a
              particular function via `{"name": "my_function"}` forces the model to call that
              function.

              `none` is the default when no functions are present. `auto` is the default if
              functions are present.

          functions: Deprecated in favor of `tools`.

              A list of functions the model may generate JSON inputs for.

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the
              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
              bias is added to the logits generated by the model prior to sampling. The exact
              effect will vary per model, but values between -1 and 1 should decrease or
              increase likelihood of selection; values like -100 or 100 should result in a ban
              or exclusive selection of the relevant token.

          logprobs: Whether to return log probabilities of the output tokens or not. If true,
              returns the log probabilities of each output token returned in the `content` of
              `message`. This option is currently not available on the `gpt-4-vision-preview`
              model.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
              completion.

              The total length of input tokens and generated tokens is limited by the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many chat completion choices to generate for each input message. Note that
              you will be charged based on the number of generated tokens across all of the
              choices. Keep `n` as `1` to minimize costs.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          response_format: An object specifying the format that the model must output. Compatible with
              [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
              all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          seed: This feature is in Beta. If specified, our system will make a best effort to
              sample deterministically, such that repeated requests with the same `seed` and
              parameters should return the same result. Determinism is not guaranteed, and you
              should refer to the `system_fingerprint` response parameter to monitor changes
              in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          tool_choice: Controls which (if any) function is called by the model. `none` means the model
              will not call a function and instead generates a message. `auto` means the model
              can pick between generating a message or calling a function. Specifying a
              particular function via
              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
              call that function.

              `none` is the default when no functions are present. `auto` is the default if
              functions are present.

          tools: A list of tools the model may call. Currently, only functions are supported as a
              tool. Use this to provide a list of functions the model may generate JSON inputs
              for. A max of 128 functions are supported.

          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
              return at each token position, each with an associated log probability.
              `logprobs` must be set to `true` if this parameter is used.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr_   r+   rI   rK   r>   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   s                            r,   ra   zCompletions.create   rb   r.   bool,ChatCompletion | Stream[ChatCompletionChunk]c                   dS rg   r_   rh   s                            r,   ra   zCompletions.create  rb   r.   rI   rK   r>   3Optional[Literal[False]] | Literal[True] | NotGivenc          
     *   |                      dt          i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d||||dt          j                  t	          ||||          t
          |pdt          t                             S Nz/chat/completionsrI   rK   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   )rB   rC   rD   )rE   rF   rG   rH   F)bodyoptionscast_tor>   
stream_cls)_postr   r!   CompletionCreateParamsr"   r   r   r   r`   s                            r,   ra   zCompletions.createg  sV   h zz U (): $]	
   !*  !*  '(8 & D D f ";  ";!" U#$ %1" )  , )?/ 2 )+Q[el   #?U12A  !
 !
 !	
r.   )r&   r'   )r&   r/   2rI   rJ   rK   rL   r3   rM   r4   rN   r5   rO   r6   rP   r7   rQ   r8   rR   r9   rR   r:   rM   r;   rS   r<   rR   r=   rT   r>   rU   r?   rM   r@   rV   rA   rW   rB   rR   rC   rM   rD   rX   rE   rY   rF   rZ   rG   r[   rH   r\   r&   r   )2rI   rJ   rK   rL   r>   rd   r3   rM   r4   rN   r5   rO   r6   rP   r7   rQ   r8   rR   r9   rR   r:   rM   r;   rS   r<   rR   r=   rT   r?   rM   r@   rV   rA   rW   rB   rR   rC   rM   rD   rX   rE   rY   rF   rZ   rG   r[   rH   r\   r&   re   )2rI   rJ   rK   rL   r>   ri   r3   rM   r4   rN   r5   rO   r6   rP   r7   rQ   r8   rR   r9   rR   r:   rM   r;   rS   r<   rR   r=   rT   r?   rM   r@   rV   rA   rW   rB   rR   rC   rM   rD   rX   rE   rY   rF   rZ   rG   r[   rH   r\   r&   rj   )2rI   rJ   rK   rL   r3   rM   r4   rN   r5   rO   r6   rP   r7   rQ   r8   rR   r9   rR   r:   rM   r;   rS   r<   rR   r=   rT   r>   rm   r?   rM   r@   rV   rA   rW   rB   rR   rC   rM   rD   rX   rE   rY   rF   rZ   rG   r[   rH   r\   r&   rj   
__name__
__module____qualname__r   r-   r1   r	   r   ra   r   r_   r.   r,   r#   r#   $   sK       0 0 0 _0 6 6 6 _6 6 9BJSLU:C.7/8&/7@NW)2;D6?2;FO>G1:,5( )-$("&;Dc{ { { { { X{z 8 9BJSLU:C.7/8&/7@NW)2;D2;FO>G1:,5( )-$("&;Dc{ { { { { X{z 8 9BJSLU:C.7/8&/7@NW)2;D2;FO>G1:,5( )-$("&;Dc{ { { { { X{z ]J(*I*I*IJJ6 9BJSLU:C.7/8&/7@NW)2;DFO2;FO>G1:,5( )-$("&;DcT
 T
 T
 T
 T
 KJT
 T
 T
r.   c                  v   e Zd Zed>d            Zed?d            Zeeeeeeeeeeeeeeeeeeedddedd@d3            Zeeeeeeeeeeeeeeeeeeddded4dAd7            Zeeeeeeeeeeeeeeeeeeddded4dBd:            Z e	dd
gg d;          eeeeeeeeeeeeeeeeeedddeddCd=            ZdS )Dr$   r&   AsyncCompletionsWithRawResponsec                     t          |           S r)   )r|   r*   s    r,   r-   z"AsyncCompletions.with_raw_response  s    .t444r.   %AsyncCompletionsWithStreamingResponsec                     t          |           S r)   )r~   r*   s    r,   r1   z(AsyncCompletions.with_streaming_response  s    4T:::r.   Nr2   rI   rJ   rK   rL   r3   rM   r4   rN   r5   rO   r6   rP   r7   rQ   r8   rR   r9   r:   r;   rS   r<   r=   rT   r>   rU   r?   r@   rV   rA   rW   rB   rC   rD   rX   rE   rY   rF   rZ   rG   r[   rH   r\   r   c               
   K   dS r^   r_   r`   s                            r,   ra   zAsyncCompletions.create        x 	r.   rc   rd    AsyncStream[ChatCompletionChunk]c               
   K   dS rg   r_   rh   s                            r,   ra   zAsyncCompletions.create  r   r.   ri   1ChatCompletion | AsyncStream[ChatCompletionChunk]c               
   K   dS rg   r_   rh   s                            r,   ra   zAsyncCompletions.createD  r   r.   rl   rm   c          
     F  K   |                      dt          i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d||||dt          j                   d {V t	          ||||          t
          |pdt          t                              d {V S ro   )rt   r   r!   ru   r"   r   r   r   r`   s                            r,   ra   zAsyncCompletions.create  s     h ZZ,U (): $]	
   !*  !*  '(8 & D D f ";  ";!" U#$ %1" )  , )?/       2 )+Q[el   #?U"#67A   !
 !
 !
 !
 !
 !
 !
 !
 !	
r.   )r&   r|   )r&   r~   rv   )2rI   rJ   rK   rL   r>   rd   r3   rM   r4   rN   r5   rO   r6   rP   r7   rQ   r8   rR   r9   rR   r:   rM   r;   rS   r<   rR   r=   rT   r?   rM   r@   rV   rA   rW   rB   rR   rC   rM   rD   rX   rE   rY   rF   rZ   rG   r[   rH   r\   r&   r   )2rI   rJ   rK   rL   r>   ri   r3   rM   r4   rN   r5   rO   r6   rP   r7   rQ   r8   rR   r9   rR   r:   rM   r;   rS   r<   rR   r=   rT   r?   rM   r@   rV   rA   rW   rB   rR   rC   rM   rD   rX   rE   rY   rF   rZ   rG   r[   rH   r\   r&   r   )2rI   rJ   rK   rL   r3   rM   r4   rN   r5   rO   r6   rP   r7   rQ   r8   rR   r9   rR   r:   rM   r;   rS   r<   rR   r=   rT   r>   rm   r?   rM   r@   rV   rA   rW   rB   rR   rC   rM   rD   rX   rE   rY   rF   rZ   rG   r[   rH   r\   r&   r   rw   r_   r.   r,   r$   r$     sK       5 5 5 _5 ; ; ; _; 6 9BJSLU:C.7/8&/7@NW)2;D6?2;FO>G1:,5( )-$("&;Dc{ { { { { X{z 8 9BJSLU:C.7/8&/7@NW)2;D2;FO>G1:,5( )-$("&;Dc{ { { { { X{z 8 9BJSLU:C.7/8&/7@NW)2;D2;FO>G1:,5( )-$("&;Dc{ { { { { X{z ]J(*I*I*IJJ6 9BJSLU:C.7/8&/7@NW)2;DFO2;FO>G1:,5( )-$("&;DcT
 T
 T
 T
 T
 KJT
 T
 T
r.   c                      e Zd ZddZdS )r'   completionsr#   r&   Nonec                P    || _         t          j        |j                  | _        d S r)   )_completionsr   to_raw_response_wrapperra   r+   r   s     r,   __init__z#CompletionsWithRawResponse.__init__[  s(    '&>
 
r.   Nr   r#   r&   r   rx   ry   rz   r   r_   r.   r,   r'   r'   Z  (        
 
 
 
 
 
r.   r'   c                      e Zd ZddZdS )r|   r   r$   r&   r   c                P    || _         t          j        |j                  | _        d S r)   )r   r   async_to_raw_response_wrapperra   r   s     r,   r   z(AsyncCompletionsWithRawResponse.__init__d  s(    '&D
 
r.   Nr   r$   r&   r   r   r_   r.   r,   r|   r|   c  r   r.   r|   c                      e Zd ZddZdS )r/   r   r#   r&   r   c                F    || _         t          |j                  | _        d S r)   )r   r   ra   r   s     r,   r   z)CompletionsWithStreamingResponse.__init__m  s%    '2
 
r.   Nr   r   r_   r.   r,   r/   r/   l  r   r.   r/   c                      e Zd ZddZdS )r~   r   r$   r&   r   c                F    || _         t          |j                  | _        d S r)   )r   r   ra   r   s     r,   r   z.AsyncCompletionsWithStreamingResponse.__init__v  s%    '8
 
r.   Nr   r   r_   r.   r,   r~   r~   u  r   r.   r~   )3
__future__r   typingr   r   r   r   r   r	   typing_extensionsr
   httpx r   _typesr   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   
types.chatr   r   r   r   r    r!   _base_clientr"   __all__r#   r$   r'   r|   r/   r~   r_   r.   r,   <module>r      s   # " " " " " B B B B B B B B B B B B B B B B % % % % % %              ? ? ? ? ? ? ? ? ? ? ? ? ? ?         
 ' & & & & & : : : : : : : : Y Y Y Y Y Y Y Y - - - - - - - -                     ,
-X

 X

 X

 X

 X

/ X

 X

 X

vX

 X

 X

 X

 X

' X

 X

 X

v
 
 
 
 
 
 
 

 
 
 
 
 
 
 

 
 
 
 
 
 
 

 
 
 
 
 
 
 
 
 
r.   