File size: 1,011 Bytes
4d3ea5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification

# Load LLAMA model and tokenizer
model_name = "sujra/insurance_Model"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# Define function for generating text
#
def generate_text(prompt):
    pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
    result = pipe(f"<s>[INST] {prompt} [/INST]")
    generated_text = result[0]['generated_text']
    return generated_text

st.title("Insurance Response Generation")

prompt_input = st.text_input("Enter your prompt:")

if st.button("Generate Response"):
    if prompt_input:
        with st.spinner("Generating response..."):  # Display a spinner while generating response
            response = generate_text(prompt_input)
        st.write("Generated Response:")
        st.write(response)
    else:
        st.write("Please enter a prompt.")