YoBatM commited on
Commit
edba113
·
verified ·
1 Parent(s): 635c8ce

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -0
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from os import environ
4
+ from json import loads
5
+ from random import randint
6
+ client = InferenceClient(
7
+ "meta-llama/Meta-Llama-3-8B-Instruct",
8
+ token=environ["token"],
9
+ )
10
+ main_prompt=environ["prompt"]
11
+ css = """
12
+ .gradio-container {
13
+ background-color: #F0F0F0;
14
+ border: 2px solid #333;
15
+ padding: 20px;
16
+ border-radius: 10px;
17
+ }
18
+
19
+ .feedback textarea {
20
+ font-size: 18px;
21
+ padding: 10px;
22
+ border: 1px solid #999;
23
+ border-radius: 5px;
24
+ }
25
+
26
+ .gradio-button {
27
+ background-color: #0074D9;
28
+ color: white;
29
+ font-weight: bold;
30
+ border: none;
31
+ border-radius: 5px;
32
+ padding: 10px 20px;
33
+ margin-right: 10px;
34
+ }
35
+ """
36
+ def slice_per(source, step):
37
+ return [
38
+ source[idx:idx+step] for idx in range(0, len(source), step)]
39
+ def llamatochat(history):
40
+
41
+ historias=list(map(lambda x:x["content"],history))
42
+ slices=slice_per(historias,2)
43
+ return slices
44
+ def chatollama(historias):
45
+ history=[]
46
+ for i,j in historias:
47
+ history.append({"role":"user","content":i})
48
+ history.append({"role":"assistant","content":j})
49
+ return history
50
+ def reply(msg,history):
51
+ historias=[{"role":"system","content":main_prompt}]
52
+ historias.extend(chatollama(history))
53
+ historias.append({"role":"user","content":msg})
54
+ m=client.chat_completion(
55
+ messages=historias,
56
+ max_tokens=500,
57
+ stream=False,
58
+ seed=randint(100_000_000,999_999_999)
59
+ )
60
+ p=m.choices[0].message.content
61
+ historias.append({"role":"assistant","content":p})
62
+ try:
63
+ l=json.loads(p)
64
+ except Exception as e:
65
+ print(p,"->")
66
+ historias.pop(0)
67
+ resp=[gr.Textbox(value=l.get("msg")),gr.Chatbot(value=llamatochat(historias),visible=False)]
68
+ for c in range(len(l.get("ops"))):
69
+ resp.append(gr.Button(value=l.get("ops")[c],visible=True))
70
+ for o in range(4-len(l.get("ops"))):
71
+ resp.append(gr.Button(value="",visible=False))
72
+ return resp
73
+ def set_theme(theme):
74
+ historias=[{"role":"system","content":main_prompt}]
75
+ historias.append({"role":'user',"content":f"Tema: {theme}"})
76
+ m=client.chat_completion(
77
+ messages=historias,
78
+ max_tokens=500,
79
+ stream=False,
80
+ seed=randint(100_000_000,999_999_999)
81
+ )
82
+ historias.pop(0)
83
+ p=m.choices[0].message.content
84
+ historias.append({"role":"assistant","content":p})
85
+ l=json.loads(p)
86
+
87
+ cc=gr.Chatbot(value=llamatochat(historias), visible=False)
88
+ resp=[gr.Textbox(visible=False),gr.Button(visible=False),gr.Textbox(visible=True,value=l.get("msg")),cc]
89
+ for c in range(len(l.get("ops"))):
90
+ resp.append(gr.Button(value=l.get("ops")[c],visible=True))
91
+ for o in range(4-len(l.get("ops"))):
92
+ resp.append(gr.Button(value="",visible=False))
93
+ return resp
94
+ with gr.Blocks(css=css) as ia:
95
+ cbox=gr.Chatbot(visible=False)
96
+ tbox=gr.Textbox(label="Tema de la historia(Ej:Amistad) puede incluir mas detalles")
97
+ mbox=gr.Textbox(label="Mensaje",visible=False,elem_classes="feedback")
98
+ tsub=gr.Button(value="Enviar")
99
+ opciones=[gr.Button(value="",visible=False,elem_classes="gradio-button") for _ in range(4)]
100
+ for opcion in opciones:
101
+ opcion.click(reply,[opcion,cbox],[mbox,cbox,*opciones])
102
+ tsub.click(set_theme,[tbox],[tbox,tsub,mbox,cbox,*opciones])
103
+ ia.launch()