sujra commited on
Commit
c4a6283
·
verified ·
1 Parent(s): 4d3ea5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -23
app.py CHANGED
@@ -1,28 +1,18 @@
1
- import streamlit as st
2
- from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
3
 
4
- # Load LLAMA model and tokenizer
5
- model_name = "sujra/insurance_Model"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
8
 
9
- # Define function for generating text
10
- #
11
- def generate_text(prompt):
12
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
13
- result = pipe(f"<s>[INST] {prompt} [/INST]")
14
- generated_text = result[0]['generated_text']
15
- return generated_text
16
 
17
- st.title("Insurance Response Generation")
 
 
18
 
19
- prompt_input = st.text_input("Enter your prompt:")
 
 
20
 
21
- if st.button("Generate Response"):
22
- if prompt_input:
23
- with st.spinner("Generating response..."): # Display a spinner while generating response
24
- response = generate_text(prompt_input)
25
- st.write("Generated Response:")
26
- st.write(response)
27
- else:
28
- st.write("Please enter a prompt.")
 
 
 
1
 
2
+ import gradio as gr
3
+ from transformers import pipeline
 
 
4
 
5
+ # Load your model from the Hugging Face model hub
6
+ model_name = "sujra/insurance_Model"
7
+ qa_pipeline = pipeline("question-answering", model=model_name, tokenizer=model_name)
 
 
 
 
8
 
9
+ def get_answer(question):
10
+ answer = qa_pipeline(question=question, max_length=128, batch_size=4)['answer'] # Reduce batch size and set max_length
11
+ return answer
12
 
13
+ # Define the input and output components for the UI
14
+ question_input = gr.inputs.Textbox(lines=2, label="Enter your question")
15
+ output_text = gr.outputs.Textbox(label="Answer")
16
 
17
+ # Create the UI
18
+ gr.Interface(fn=get_answer, inputs=question_input, outputs=output_text, title="Insurance Question Answering").launch()