Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -18,60 +18,12 @@
|
|
18 |
# )
|
19 |
# return chat_completion.choices[0].message.content
|
20 |
|
21 |
-
# custom_css = """
|
22 |
-
# body {
|
23 |
-
# background-color: #f5f5f5;
|
24 |
-
# font-family: 'Arial', sans-serif;
|
25 |
-
# color: #333;
|
26 |
-
# }
|
27 |
-
|
28 |
-
# .gradio-container {
|
29 |
-
# border-radius: 12px;
|
30 |
-
# padding: 20px;
|
31 |
-
# background-color: #ffffff;
|
32 |
-
# box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
|
33 |
-
# }
|
34 |
-
|
35 |
-
# input[type="text"], textarea {
|
36 |
-
# border-radius: 10px;
|
37 |
-
# border: 1px solid #ddd;
|
38 |
-
# padding: 12px;
|
39 |
-
# width: 100%;
|
40 |
-
# font-size: 14px;
|
41 |
-
# color: #333;
|
42 |
-
# background-color: #f9f9f9;
|
43 |
-
# }
|
44 |
-
|
45 |
-
# button {
|
46 |
-
# background-color: #007bff;
|
47 |
-
# color: white;
|
48 |
-
# border: none;
|
49 |
-
# padding: 12px 24px;
|
50 |
-
# border-radius: 10px;
|
51 |
-
# cursor: pointer;
|
52 |
-
# font-size: 14px;
|
53 |
-
# font-weight: bold;
|
54 |
-
# }
|
55 |
-
|
56 |
-
# button:hover {
|
57 |
-
# background-color: #0056b3;
|
58 |
-
# }
|
59 |
-
|
60 |
-
# h1 {
|
61 |
-
# font-weight: 600;
|
62 |
-
# color: #333;
|
63 |
-
# }
|
64 |
-
|
65 |
-
# textarea {
|
66 |
-
# resize: none;
|
67 |
-
# }
|
68 |
-
# """
|
69 |
|
70 |
# iface = gr.Interface(
|
71 |
# fn=generate_response,
|
72 |
# inputs=gr.Textbox(label="ورودی" , lines=2, placeholder="اینجا یه چی بپرس... "),
|
73 |
# outputs=gr.Textbox(label="جواب"),
|
74 |
-
# title="💬 Parviz
|
75 |
# description="زنده باد",
|
76 |
# theme="dark",
|
77 |
# allow_flagging="never"
|
@@ -79,36 +31,36 @@
|
|
79 |
# )
|
80 |
# iface.launch()
|
81 |
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
|
86 |
-
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
|
99 |
-
|
100 |
|
101 |
|
102 |
-
|
103 |
-
|
104 |
|
105 |
-
|
106 |
-
|
107 |
|
108 |
-
|
109 |
|
110 |
-
|
111 |
-
|
112 |
|
113 |
|
114 |
|
@@ -117,60 +69,53 @@
|
|
117 |
|
118 |
|
119 |
|
120 |
-
import gradio as gr
|
121 |
-
import torch
|
122 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, GenerationConfig
|
123 |
-
import re
|
124 |
-
import time
|
125 |
|
126 |
|
127 |
-
tokenizer = AutoTokenizer.from_pretrained("universitytehran/PersianMind-v1.0")
|
128 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("universitytehran/PersianMind-v1.0")
|
129 |
|
130 |
|
131 |
-
def generate_response(message, chat_history):
|
132 |
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
tokenized_test_text = tokenizer(prompt, return_tensors='pt').input_ids.to("cpu")
|
151 |
-
model.to("cpu")
|
152 |
-
|
153 |
-
|
154 |
-
outputs = model.generate(tokenized_test_text, generation_config=generation_config, max_new_tokens=128)
|
155 |
-
result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
156 |
-
|
157 |
-
for i in range(0, len(result), 10):
|
158 |
-
yield chat_history + [(message, result[:i + 10])]
|
159 |
-
time.sleep(0.1)
|
160 |
|
161 |
-
|
|
|
|
|
|
|
|
|
162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
|
|
|
|
|
|
164 |
|
165 |
-
with gr.Blocks() as demo:
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
msg.submit(generate_response, [msg, chatbot], chatbot)
|
172 |
-
|
173 |
-
clear = gr.ClearButton([msg, chatbot])
|
174 |
-
|
175 |
-
demo.launch()
|
176 |
|
|
|
|
18 |
# )
|
19 |
# return chat_completion.choices[0].message.content
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
# iface = gr.Interface(
|
23 |
# fn=generate_response,
|
24 |
# inputs=gr.Textbox(label="ورودی" , lines=2, placeholder="اینجا یه چی بپرس... "),
|
25 |
# outputs=gr.Textbox(label="جواب"),
|
26 |
+
# title="💬 Parviz GPT",
|
27 |
# description="زنده باد",
|
28 |
# theme="dark",
|
29 |
# allow_flagging="never"
|
|
|
31 |
# )
|
32 |
# iface.launch()
|
33 |
|
34 |
+
import gradio as gr
|
35 |
+
from groq import Groq
|
36 |
+
import time
|
37 |
|
38 |
+
client = Groq(api_key="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")
|
39 |
|
40 |
+
def generate_response(message, chat_history):
|
41 |
+
chat_completion = client.chat.completions.create(
|
42 |
+
messages=[{"role": "user", "content": message}],
|
43 |
+
model="llama3-8b-8192",
|
44 |
+
)
|
45 |
+
bot_message = chat_completion.choices[0].message.content
|
46 |
|
47 |
+
for i in range(0, len(bot_message), 10):
|
48 |
+
yield chat_history + [(message, bot_message[:i + 10])]
|
49 |
+
time.sleep(0.1)
|
50 |
|
51 |
+
yield chat_history + [(message, bot_message)]
|
52 |
|
53 |
|
54 |
+
with gr.Blocks() as demo:
|
55 |
+
gr.Markdown("<h1 style='text-align: center;'>💬 Parviz GPT</h1><p style='text-align: center; color: #e0e0e0;'>زنده باد</p>")
|
56 |
|
57 |
+
chatbot = gr.Chatbot(label="جواب")
|
58 |
+
msg = gr.Textbox(label="ورودی", placeholder="اینجا یه چی بپرس... ", lines=1)
|
59 |
|
60 |
+
msg.submit(generate_response, [msg, chatbot], chatbot)
|
61 |
|
62 |
+
clear = gr.ClearButton([msg, chatbot])
|
63 |
+
demo.launch()
|
64 |
|
65 |
|
66 |
|
|
|
69 |
|
70 |
|
71 |
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
|
|
|
|
|
74 |
|
75 |
|
|
|
76 |
|
77 |
+
# import gradio as gr
|
78 |
+
# import torch
|
79 |
+
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
80 |
+
|
81 |
+
# tokenizer = AutoTokenizer.from_pretrained("universitytehran/PersianMind-v1.0", use_fast=True)
|
82 |
+
# model = AutoModelForCausalLM.from_pretrained(
|
83 |
+
# "universitytehran/PersianMind-v1.0",
|
84 |
+
# torch_dtype=torch.bfloat16
|
85 |
+
# ).to("cpu")
|
86 |
+
|
87 |
+
# CONTEXT = (
|
88 |
+
# "This is a conversation with ParvizGPT. It is an artificial intelligence model designed by Amir Mahdi Parviz, "
|
89 |
+
# "an NLP expert, to help you with various tasks such as answering questions, "
|
90 |
+
# "providing recommendations, and assisting with decision-making. Ask it anything!"
|
91 |
+
# )
|
92 |
+
# pretokenized_context = tokenizer(CONTEXT, return_tensors="pt").input_ids.to("cpu")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
+
# def generate_response(message, chat_history):
|
95 |
+
# prompt = torch.cat(
|
96 |
+
# [pretokenized_context, tokenizer("\nYou: " + message + "\nParvizGPT: ", return_tensors="pt").input_ids.to("cpu")],
|
97 |
+
# dim=1
|
98 |
+
# )
|
99 |
|
100 |
+
# with torch.no_grad():
|
101 |
+
# outputs = model.generate(
|
102 |
+
# prompt,
|
103 |
+
# max_new_tokens=32,
|
104 |
+
# temperature=0.6,
|
105 |
+
# top_k=20,
|
106 |
+
# top_p=0.8,
|
107 |
+
# do_sample=True
|
108 |
+
# )
|
109 |
|
110 |
+
# result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
111 |
+
# response = result.split("ParvizGPT:")[-1].strip()
|
112 |
+
# return chat_history + [(message, response)]
|
113 |
|
114 |
+
# with gr.Blocks() as demo:
|
115 |
+
# gr.Markdown("<h1 style='text-align: center;'>💬 Parviz GPT</h1>")
|
116 |
+
# chatbot = gr.Chatbot(label="Response")
|
117 |
+
# msg = gr.Textbox(label="Input", placeholder="Ask your question...", lines=1)
|
118 |
+
# msg.submit(generate_response, [msg, chatbot], chatbot)
|
119 |
+
# gr.ClearButton([msg, chatbot])
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
+
# demo.launch()
|