Spaces:
Runtime error
Runtime error
File size: 3,290 Bytes
d36980f 6aa20e4 d36980f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import gradio as gr # to create the web UI for the application
from openai import OpenAI # to interact with LM Studio models
import re # for text manipulation
# ANSI escape code for colors
RESET_COLOR = '\033[0m'
NEON_GREEN = '\033[92m'
client = OpenAI(base_url="http://localhost:1836/v1", api_key="lm-studio")
# Initialize an empty list to store conversation history
conversation_history = []
def format_response_text(text):
"""
Formats the response text for improved readability.
:param text: The raw response text.
:return: Formatted text.
"""
# New paragraphs after each period, question mark, or exclamation point
text = re.sub(r'(?<=[.!?])\s+(?=[A-Z])', '\n\n', text)
# Properly indent bullet points and numbered lists
text = re.sub(r'(\n)?(\s*)?([•\-*]|\d+\.)\s+', r'\n \3 ', text)
return text
def mistral_streamed_interaction(user_input, conversation_history):
"""
Interacts with the mistral model via LM Studio, maintaining conversation context.
:param user_input: String, user's input.
:param conversation_history: List, the conversation history.
:return: Tuple, containing the response and updated conversation history.
"""
# Add user's input to conversation history
conversation_history.append({"role": "user", "content": user_input})
streamed_completion = client.chat.completions.create(
model="TheBloke/dolphin-2.2.1-mistral-7B-GGUF/dolphin-2.2.1-mistral-7b.Q4_K_S.gguf",
messages=conversation_history,
stream=True # Enable streaming
)
full_response = ""
line_buffer = ""
for chunk in streamed_completion:
delta_content = chunk.choices[0].delta.content
if delta_content:
line_buffer += delta_content
if '\n' in line_buffer:
lines = line_buffer.split('\n')
full_response += '\n'.join(lines[:-1])
line_buffer = lines[-1]
if line_buffer:
full_response += line_buffer
full_response = format_response_text(full_response)
# Add model's response to conversation history
conversation_history.append({"role": "system", "content": full_response})
return full_response, conversation_history
def clear_conversation_history():
"""
Clears the conversation history.
"""
global conversation_history
conversation_history = []
print("Conversation history cleared.")
def gradio_interface_interaction(user_input):
"""
This function acts as the bridge between the Gradio interface and the chat logic.
It processes the user input via the existing chat logic and returns the response.
:param user_input: User input from the Gradio interface.
:return: Response text to be displayed in the Gradio interface.
"""
# Call the existing chat interaction function with the global conversation history
response, _ = mistral_streamed_interaction(user_input, conversation_history)
return response
# Modify the Gradio interface to use the new interaction function
iface = gr.Interface(
fn=gradio_interface_interaction,
inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here"),
outputs=gr.Textbox(),
)
# Launch the Gradio interface
iface.launch(share=True)
|