SupermanRX commited on
Commit
3472271
·
verified ·
1 Parent(s): 552e1de

again new update

Browse files
Files changed (1) hide show
  1. app.py +21 -19
app.py CHANGED
@@ -1,25 +1,27 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load your fine-tuned model
5
- model_name = "SupermanRX/moderateTherapistModel" # Replace with your Hugging Face model path
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
 
9
  def chatbot(input_text):
10
- # Process user input and generate response
11
- inputs = tokenizer(input_text, return_tensors="pt")
12
- outputs = model.generate(inputs["input_ids"], max_length=200, pad_token_id=tokenizer.eos_token_id)
13
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
14
- return response
15
 
16
- # Create a Gradio interface
17
- interface = gr.Interface(
18
- fn=chatbot,
19
- inputs="text",
20
- outputs="text",
21
- title="Chat with Your Model"
22
- )
23
 
24
- # Launch the Gradio app
25
- interface.launch()
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from llama_cpp import Llama
3
 
4
+ # Load the GGUF model
5
+ model_path = "model.gguf" # Ensure this matches your uploaded file name
6
+ llm = Llama(model_path=model_path)
 
7
 
8
+ # Define the chatbot function
9
  def chatbot(input_text):
10
+ output = llm(prompt=input_text, max_tokens=200)
11
+ return output['choices'][0]['text']
 
 
 
12
 
13
+ # Create Gradio interface
14
+ with gr.Blocks() as demo:
15
+ chatbot_ui = gr.Chatbot()
16
+ textbox = gr.Textbox(label="Type your message here:")
17
+ submit = gr.Button("Send")
 
 
18
 
19
+ # Handle user interaction
20
+ def user_interaction(input_text, chat_history):
21
+ response = chatbot(input_text)
22
+ chat_history.append((input_text, response))
23
+ return chat_history, ""
24
+
25
+ submit.click(user_interaction, [textbox, chatbot_ui], [chatbot_ui, textbox])
26
+
27
+ demo.launch()