File size: 2,579 Bytes
fd3db54
 
 
 
820777f
 
 
 
fd3db54
 
820777f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd3db54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import gradio as gr
from openai import OpenAI
import os

DESCRIPTION = '''
<div>
<h1 style="text-align: center;">ContenteaseAI custom trained model</h1>
</div>
'''

LICENSE = """
<p/>
---
For more information, visit our [website](https://contentease.ai).
"""

PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
   <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ContenteaseAI Custom AI trained model</h1>
   <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Enter the text extracted from the PDF:</p>
</div>
"""

css = """
h1 {
  text-align: center;
  display: block;
}
"""

ACCESS_TOKEN = os.getenv("HF_TOKEN")

client = OpenAI(
    base_url="https://api-inference.huggingface.co/v1/",
    api_key=ACCESS_TOKEN,
)

def respond(
    message,
    history,
    max_tokens,
    temperature,
    system_message=""" Extract the following information from the given text:
            Identify the specific areas where the work needs to be done and Add the furniture that has to be changed.
            Do not specify the work that has to be done.
            Format the extracted information in the following JSON structure:

            {
              "Area Type1": {
                  "Furniture1",
                  "Furniture2",
                  ...
              }
              "Area Type2": {
                   "Furniture1",
                  "Furniture2",
                  ...
              }
            }""",
):
    messages = [{"role": "system", "content": system_message}]
    if len(history) == 0:
      pass
    else:
      history.pop()
      
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    response = ""

    for message in  client.chat.completions.create(
        model="meta-llama/Meta-Llama-3.1-8B-Instruct",
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        messages=messages,
    ):
        token = message.choices[0].delta.content

        response += token
        yield response

demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),

    ],
    css=css,
)
if __name__ == "__main__":
    demo.launch(debug = True)