GSridhar1982 commited on
Commit
e05a359
·
verified ·
1 Parent(s): 6d13369

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -10
app.py CHANGED
@@ -1,4 +1,8 @@
1
- #import gradio as gr
 
 
 
 
2
  #from llama_cpp import Llama
3
 
4
  # Load the Llama model
@@ -21,17 +25,26 @@
21
  # Extract the model's reply
22
  # model_reply = response['choices'][0]['message']['content']
23
  # return model_reply
 
 
 
 
 
 
 
 
24
 
25
- from transformers import pipeline
26
-
27
- # Replace with your Hugging Face model space URL
28
- model_id = "GSridhar1982/AIML_QA_Llama31_FineTuned_UsingLora"
29
-
30
- pipe = pipeline("text2text-generation", model=model_id)
 
31
 
32
- def generate_text(prompt):
33
- """Generates text using the loaded Hugging Face model."""
34
- return pipe(prompt)[0]['generated_text']
35
 
36
  # Create a Gradio interface
37
  iface = gr.Interface(
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ from peft import AutoPeftModelForCausalLM
4
+ from transformers import AutoTokenizer
5
+
6
  #from llama_cpp import Llama
7
 
8
  # Load the Llama model
 
25
  # Extract the model's reply
26
  # model_reply = response['choices'][0]['message']['content']
27
  # return model_reply
28
+ def generate_answer(user_input):
29
+ model = AutoPeftModelForCausalLM.from_pretrained(
30
+ "GSridhar1982/AIML_QA_Llama31_FineTuned_UsingLora", # YOUR MODEL YOU USED FOR TRAINING
31
+ load_in_4bit = load_in_4bit,
32
+ )
33
+ tokenizer = AutoTokenizer.from_pretrained("GSridhar1982/AIML_QA_Llama31_FineTuned_UsingLora")
34
+ # Create a text generation pipeline
35
+ generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
36
 
37
+ # Generate predictions on the test dataset
38
+ # Access the input column of the dataset using the column name
39
+ predictions = generator(
40
+ user_input,
41
+ max_new_tokens=100,
42
+ num_beams=1,
43
+ )
44
 
45
+ # Extract the generated text from the pipeline output
46
+ predictions = [pred[0]['generated_text'] for pred in predictions]
47
+ return predictions
48
 
49
  # Create a Gradio interface
50
  iface = gr.Interface(