migueldeguzmandev commited on
Commit
40cb650
·
verified ·
1 Parent(s): b39bf41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -1
app.py CHANGED
@@ -1,3 +1,32 @@
1
  import gradio as gr
 
2
 
3
- gr.load("models/migueldeguzmandev/RLLMv3.2-10").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load the model and tokenizer
5
+ model_name = "migueldeguzmandev/migueldeguzmandev-RLLMv3.2-10"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ # Define the inference function
10
+ def generate_response(input_text):
11
+ # Tokenize the input text
12
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
13
+
14
+ # Generate the model's response
15
+ output = model.generate(input_ids, max_length=100, num_return_sequences=1)
16
+
17
+ # Decode the generated response
18
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
19
+
20
+ return response
21
+
22
+ # Create the Gradio interface
23
+ interface = gr.Interface(
24
+ fn=generate_response,
25
+ inputs=gr.Textbox(label="User Input"),
26
+ outputs=gr.Textbox(label="Model Response"),
27
+ title="Conversation with migueldeguzmandev-RLLMv3.2-10",
28
+ description="Enter your message and the model will generate a response.",
29
+ )
30
+
31
+ # Launch the interface
32
+ interface.launch()