|
import streamlit as st |
|
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
|
|
model_name = "sujra/insurance_Model" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
|
|
|
|
|
|
def generate_text(prompt): |
|
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200) |
|
result = pipe(f"<s>[INST] {prompt} [/INST]") |
|
generated_text = result[0]['generated_text'] |
|
return generated_text |
|
|
|
st.title("Insurance Response Generation") |
|
|
|
prompt_input = st.text_input("Enter your prompt:") |
|
|
|
if st.button("Generate Response"): |
|
if prompt_input: |
|
with st.spinner("Generating response..."): |
|
response = generate_text(prompt_input) |
|
st.write("Generated Response:") |
|
st.write(response) |
|
else: |
|
st.write("Please enter a prompt.") |
|
|