File size: 1,889 Bytes
a720afd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09c1b8e
a720afd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
from modules.prompt import generate_prompt
from modules.config import client, azure_ml_endpoint_url, headers
from modules.search import search
import time
import requests
import json
def respond(
    message,
    history: list[tuple[str, str]],
    model
):
    print("Model: ", model)
    messages = [{"role": "user", "content": msg[0]} if msg[0] else {"role": "assistant", "content": msg[1]} for msg in history]
    messages.append({"role": "user", "content": message})

    response = ""

    highlighted_content = search(message)
    GROUNDED_PROMPT = generate_prompt(message, highlighted_content)
    print("Modelo = ", model)
    if model == "gpt-4" or model == "gpt-35-turbo":
      print("Utilizando gpt model ", model)
      completion = client.chat.completions.create(
          model=model,
          messages=[
              {
                  "role": "user",
                  "content": GROUNDED_PROMPT,
              },
          ],
      )
      token = completion.choices[0].message.content
    else: #Phi
      print("Utilizando phi model ", model)
      data = {
        "messages": [
            {"role": "user", "content": GROUNDED_PROMPT}
        ],
        "temperature": 0.8,
        "top_p": 0.1,
        "max_tokens": 2048
       }

      # Converta para JSON
      payload = json.dumps(data)
      # Realizar a inferência
      response_api = requests.post(azure_ml_endpoint_url, headers=headers, data=payload)
      response_json = response_api.json()
      content = response_json['choices'][0]['message']['content']
      token = content.strip()

    response += token
    # yield response

    # Envia o texto gradualmente (efeito de digitação)
    displayed_response = ""
    for char in response:
        displayed_response += char
        time.sleep(0.01)  # Intervalo entre os caracteres para o efeito de digitação
        yield displayed_response