Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import os | |
import gradio as gr | |
from http import HTTPStatus | |
import openai | |
from typing import Generator, List, Optional, Tuple, Dict | |
from urllib.error import HTTPError | |
API_URL = os.getenv('API_URL') | |
API_KEY = os.getenv('API_KEY') | |
oai_client = openai.OpenAI(api_key=API_KEY, base_url=API_URL) | |
History = List[Tuple[str, str]] | |
Messages = List[Dict[str, str]] | |
def clear_session() -> History: | |
return '', [] | |
def history_to_messages(history: History) -> Messages: | |
messages = [] | |
for h in history: | |
messages.append({'role': 'user', 'content': h[0]}) | |
messages.append({'role': 'assistant', 'content': h[1]}) | |
return messages | |
def messages_to_history(messages: Messages) -> Tuple[str, History]: | |
history = [] | |
for q, r in zip(messages[0::2], messages[1::2]): | |
history.append([q['content'], r['content']]) | |
return history | |
def model_chat(query: Optional[str], history: Optional[History]) -> Generator[Tuple[str, History], None, None]: | |
if query is None: | |
query = '' | |
if history is None: | |
history = [] | |
messages = history_to_messages(history) | |
messages.append({'role': 'user', 'content': query}) | |
gen = oai_client.chat.completions.create( | |
model='dicta-il/dictalm2.0-instruct', | |
messages=messages, | |
temperature=0.7, | |
max_tokens=1024, | |
top_p=0.9, | |
stream=True | |
) | |
full_response = '' | |
for completion in gen: | |
text = completion.choices[0].delta.content | |
full_response += text or '' | |
yield full_response | |
with gr.Blocks(css=''' | |
.gr-group {direction: rtl;} | |
.chatbot{text-align:right;} | |
.dicta-header { | |
background-color: #f4f4f4; /* Replace with desired background color */ | |
border-radius: 10px; | |
padding: 20px; | |
text-align: center; | |
display: flex; | |
flex-direction: row; | |
align-items: center; | |
} | |
.dicta-logo { | |
width: 150px; /* Replace with actual logo width as desired */ | |
height: auto; | |
margin-bottom: 20px; | |
} | |
.dicta-intro-text { | |
color: #333; /* Replace with desired text color */ | |
margin-bottom: 20px; | |
text-align: center; | |
display: flex; | |
flex-direction: column; | |
align-items: center; | |
width: 100%; | |
} | |
''') as demo: | |
gr.Markdown(""" | |
<div class="dicta-header"> | |
<img src="file/dicta-logo.jpg" alt="Dicta Logo" class="dicta-logo"> | |
<div class="dicta-intro-text"> | |
<h1>DictaLM 2.0 - Instruct Chat Demo</h1> | |
<p>Welcome to the interactive demo of DictaLM-2.0. Explore the capabilities of our model and see how it can assist with your tasks.</p> | |
<p dir='rtl'> 讘专讜讻讬诐 讛讘讗讬诐 诇讚诪讜 讛讗讬谞讟专讗拽讟讬讘讬 砖诇 DictaLM-2.0. 讞拽专讜 讗转 讬讻讜诇讜转 讛诪讜讚诇 砖诇谞讜 讜专讗讜 讻讬爪讚 讛讜讗 讬讻讜诇 诇住讬讬注 诇讻诐 讘诪砖讬诪讜转讬讻诐.</p> | |
</div> | |
</div> | |
""") | |
interface = gr.ChatInterface(model_chat, fill_height=False) | |
interface.chatbot.rtl = True | |
interface.textbox.placeholder = "讛讻谞住 砖讗诇讛 讘注讘专讬转 (讗讜 讘讗谞讙诇讬转!)" | |
interface.textbox.rtl = True | |
interface.textbox.text_align = 'right' | |
interface.theme_css += '.gr-group {direction: rtl !important;}' | |
demo.queue(api_open=False).launch(max_threads=10, share=False, allowed_paths=['dicta-logo.jpg']) |