huriacane33 commited on
Commit
d25636a
·
verified ·
1 Parent(s): 2e8f79f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -8
app.py CHANGED
@@ -1,10 +1,18 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
  import pandas as pd
4
  import re
5
 
6
- # Load the Question Answering model
7
- qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
 
 
 
 
 
 
 
 
8
 
9
  # Load SOP Dataset
10
  @st.cache_data
@@ -13,7 +21,6 @@ def load_sop_dataset():
13
  dataset = pd.read_csv("dataset.csv") # Ensure this file is uploaded to your Hugging Face Space
14
  return dataset
15
 
16
- # Load the dataset
17
  dataset = load_sop_dataset()
18
 
19
  # Utility function to find the most relevant context
@@ -32,7 +39,7 @@ def find_best_context(question, dataset):
32
  return best_context
33
 
34
  # Streamlit UI
35
- st.title("SOP Question Answering AI")
36
  st.markdown("Ask any question about Standard Operating Procedures:")
37
 
38
  # User input
@@ -47,10 +54,10 @@ if st.button("Get Answer"):
47
 
48
  if context:
49
  with st.spinner("Answering your question..."):
50
- result = qa_pipeline(question=question, context=context)
 
51
  st.success("Answer:")
52
- st.write(result["answer"])
53
- st.write("Confidence Score:", result["score"])
54
  else:
55
  st.warning("No relevant context found. Please try rephrasing your question.")
56
  else:
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import pandas as pd
4
  import re
5
 
6
+ # Load the LLaMA model and tokenizer
7
+ @st.cache_resource
8
+ def load_llama_model():
9
+ """Load the LLaMA model and tokenizer."""
10
+ model_name = "meta-llama/Llama-2-7b-chat-hf" # Replace with your preferred LLaMA model
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+ model = AutoModelForCausalLM.from_pretrained(model_name)
13
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
14
+
15
+ qa_pipeline = load_llama_model()
16
 
17
  # Load SOP Dataset
18
  @st.cache_data
 
21
  dataset = pd.read_csv("dataset.csv") # Ensure this file is uploaded to your Hugging Face Space
22
  return dataset
23
 
 
24
  dataset = load_sop_dataset()
25
 
26
  # Utility function to find the most relevant context
 
39
  return best_context
40
 
41
  # Streamlit UI
42
+ st.title("SOP Question Answering AI with LLaMA")
43
  st.markdown("Ask any question about Standard Operating Procedures:")
44
 
45
  # User input
 
54
 
55
  if context:
56
  with st.spinner("Answering your question..."):
57
+ prompt = f"Context: {context}\n\nQuestion: {question}\nAnswer:"
58
+ result = qa_pipeline(prompt, max_length=150, num_return_sequences=1)
59
  st.success("Answer:")
60
+ st.write(result[0]["generated_text"])
 
61
  else:
62
  st.warning("No relevant context found. Please try rephrasing your question.")
63
  else: