Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -15,56 +15,46 @@ st.set_page_config(page_title="Chat with Notes and AI", page_icon=":books:", lay
|
|
15 |
# Load environment variables
|
16 |
load_dotenv()
|
17 |
|
18 |
-
# Dolly-v2
|
19 |
@st.cache_resource
|
20 |
def load_pipeline():
|
21 |
-
model_name = "databricks/dolly-v2-
|
22 |
|
23 |
-
# Load tokenizer
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left", trust_remote_code=True)
|
25 |
-
|
26 |
-
# Load model with offload folder for disk storage of weights
|
27 |
model = AutoModelForCausalLM.from_pretrained(
|
28 |
model_name,
|
29 |
-
torch_dtype=torch.
|
30 |
-
device_map="auto",
|
31 |
trust_remote_code=True,
|
32 |
-
offload_folder="./offload_weights" # Folder to store
|
33 |
)
|
34 |
|
35 |
-
#
|
36 |
return pipeline(
|
37 |
task="text-generation",
|
38 |
model=model,
|
39 |
tokenizer=tokenizer,
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
)
|
44 |
|
45 |
# Initialize Dolly pipeline
|
46 |
generate_text = load_pipeline()
|
47 |
|
48 |
-
# Create
|
49 |
hf_pipeline = HuggingFacePipeline(pipeline=generate_text)
|
50 |
|
51 |
-
#
|
52 |
-
prompt = PromptTemplate(
|
53 |
-
|
54 |
-
template="{instruction}"
|
55 |
-
)
|
56 |
-
|
57 |
-
# Template for prompts with context
|
58 |
-
prompt_with_context = PromptTemplate(
|
59 |
-
input_variables=["instruction", "context"],
|
60 |
-
template="{instruction}\n\nInput:\n{context}"
|
61 |
-
)
|
62 |
|
63 |
# Create LLM chains
|
64 |
llm_chain = LLMChain(llm=hf_pipeline, prompt=prompt)
|
65 |
llm_context_chain = LLMChain(llm=hf_pipeline, prompt=prompt_with_context)
|
66 |
|
67 |
-
#
|
68 |
def get_text_files_content(folder):
|
69 |
text = ""
|
70 |
for filename in os.listdir(folder):
|
@@ -73,36 +63,30 @@ def get_text_files_content(folder):
|
|
73 |
text += file.read() + "\n"
|
74 |
return text
|
75 |
|
76 |
-
#
|
77 |
def get_chunks(raw_text):
|
78 |
from langchain.text_splitter import CharacterTextSplitter
|
79 |
text_splitter = CharacterTextSplitter(
|
80 |
separator="\n",
|
81 |
-
chunk_size=
|
82 |
-
chunk_overlap=
|
83 |
length_function=len
|
84 |
)
|
85 |
-
|
86 |
-
return chunks
|
87 |
|
88 |
-
#
|
89 |
def get_vectorstore(chunks):
|
90 |
embeddings = HuggingFaceEmbeddings(
|
91 |
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
92 |
-
model_kwargs={'device': 'cpu'} #
|
93 |
)
|
94 |
-
|
95 |
-
return vectorstore
|
96 |
|
97 |
-
#
|
98 |
def handle_question(question, vectorstore=None):
|
99 |
if vectorstore:
|
100 |
-
|
101 |
-
|
102 |
-
context = "\n".join([doc.page_content for doc in documents])
|
103 |
-
|
104 |
-
# Limit context to 1000 characters to speed up model inference
|
105 |
-
context = context[:1000]
|
106 |
|
107 |
if context:
|
108 |
result_with_context = llm_context_chain.invoke({"instruction": question, "context": context})
|
@@ -122,26 +106,18 @@ def main():
|
|
122 |
data_folder = "data" # Current Affairs folders
|
123 |
essay_folder = "essays" # Essays folder
|
124 |
|
125 |
-
#
|
126 |
content_type = st.sidebar.radio("Select Content Type:", ["Current Affairs", "Essays"])
|
127 |
|
128 |
-
# Handle
|
129 |
if content_type == "Current Affairs":
|
130 |
-
if os.path.exists(data_folder)
|
131 |
-
subjects = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))]
|
132 |
-
else:
|
133 |
-
subjects = []
|
134 |
-
# Handle Essays (all essays are in a single folder)
|
135 |
elif content_type == "Essays":
|
136 |
-
if os.path.exists(essay_folder)
|
137 |
-
subjects = [f.replace(".txt", "") for f in os.listdir(essay_folder) if f.endswith('.txt')]
|
138 |
-
else:
|
139 |
-
subjects = []
|
140 |
|
141 |
-
# Subject selection
|
142 |
selected_subject = st.sidebar.selectbox("Select a Subject:", subjects)
|
143 |
|
144 |
-
# Process selected subject
|
145 |
raw_text = ""
|
146 |
if content_type == "Current Affairs" and selected_subject:
|
147 |
subject_folder = os.path.join(data_folder, selected_subject)
|
@@ -152,15 +128,15 @@ def main():
|
|
152 |
with open(subject_file, "r", encoding="utf-8") as file:
|
153 |
raw_text = file.read()
|
154 |
|
155 |
-
# Display preview of notes
|
156 |
if raw_text:
|
157 |
st.subheader("Preview of Notes")
|
158 |
-
st.text_area("Preview Content:", value=raw_text[:
|
159 |
|
160 |
-
#
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
else:
|
165 |
st.warning("No content available for the selected subject.")
|
166 |
|
|
|
15 |
# Load environment variables
|
16 |
load_dotenv()
|
17 |
|
18 |
+
# Optimized Dolly-v2 model pipeline
|
19 |
@st.cache_resource
|
20 |
def load_pipeline():
|
21 |
+
model_name = "databricks/dolly-v2-1b" # Smaller model for CPU
|
22 |
|
23 |
+
# Load tokenizer and model
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left", trust_remote_code=True)
|
|
|
|
|
25 |
model = AutoModelForCausalLM.from_pretrained(
|
26 |
model_name,
|
27 |
+
torch_dtype=torch.float32, # Use float32 for CPU
|
28 |
+
device_map="auto",
|
29 |
trust_remote_code=True,
|
30 |
+
offload_folder="./offload_weights" # Folder to store weights if needed
|
31 |
)
|
32 |
|
33 |
+
# Create text-generation pipeline
|
34 |
return pipeline(
|
35 |
task="text-generation",
|
36 |
model=model,
|
37 |
tokenizer=tokenizer,
|
38 |
+
max_new_tokens=50, # Limit response length for speed
|
39 |
+
return_full_text=False,
|
40 |
+
device_map="auto"
|
41 |
)
|
42 |
|
43 |
# Initialize Dolly pipeline
|
44 |
generate_text = load_pipeline()
|
45 |
|
46 |
+
# Create HuggingFace pipeline wrapper for LangChain
|
47 |
hf_pipeline = HuggingFacePipeline(pipeline=generate_text)
|
48 |
|
49 |
+
# Prompt templates
|
50 |
+
prompt = PromptTemplate(input_variables=["instruction"], template="{instruction}")
|
51 |
+
prompt_with_context = PromptTemplate(input_variables=["instruction", "context"], template="{instruction}\n\nInput:\n{context}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# Create LLM chains
|
54 |
llm_chain = LLMChain(llm=hf_pipeline, prompt=prompt)
|
55 |
llm_context_chain = LLMChain(llm=hf_pipeline, prompt=prompt_with_context)
|
56 |
|
57 |
+
# Extract text from .txt files
|
58 |
def get_text_files_content(folder):
|
59 |
text = ""
|
60 |
for filename in os.listdir(folder):
|
|
|
63 |
text += file.read() + "\n"
|
64 |
return text
|
65 |
|
66 |
+
# Convert text into smaller chunks
|
67 |
def get_chunks(raw_text):
|
68 |
from langchain.text_splitter import CharacterTextSplitter
|
69 |
text_splitter = CharacterTextSplitter(
|
70 |
separator="\n",
|
71 |
+
chunk_size=512, # Smaller chunks for faster processing
|
72 |
+
chunk_overlap=50, # Minimal overlap
|
73 |
length_function=len
|
74 |
)
|
75 |
+
return text_splitter.split_text(raw_text)
|
|
|
76 |
|
77 |
+
# Create FAISS vectorstore
|
78 |
def get_vectorstore(chunks):
|
79 |
embeddings = HuggingFaceEmbeddings(
|
80 |
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
81 |
+
model_kwargs={'device': 'cpu'} # Force CPU usage for embeddings
|
82 |
)
|
83 |
+
return FAISS.from_texts(texts=chunks, embedding=embeddings)
|
|
|
84 |
|
85 |
+
# Generate response from user queries
|
86 |
def handle_question(question, vectorstore=None):
|
87 |
if vectorstore:
|
88 |
+
documents = vectorstore.similarity_search(question, k=1) # Retrieve fewer chunks
|
89 |
+
context = "\n".join([doc.page_content for doc in documents])[:512] # Shorter context
|
|
|
|
|
|
|
|
|
90 |
|
91 |
if context:
|
92 |
result_with_context = llm_context_chain.invoke({"instruction": question, "context": context})
|
|
|
106 |
data_folder = "data" # Current Affairs folders
|
107 |
essay_folder = "essays" # Essays folder
|
108 |
|
109 |
+
# Sidebar for content selection
|
110 |
content_type = st.sidebar.radio("Select Content Type:", ["Current Affairs", "Essays"])
|
111 |
|
112 |
+
# Handle folder-based selection
|
113 |
if content_type == "Current Affairs":
|
114 |
+
subjects = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))] if os.path.exists(data_folder) else []
|
|
|
|
|
|
|
|
|
115 |
elif content_type == "Essays":
|
116 |
+
subjects = [f.replace(".txt", "") for f in os.listdir(essay_folder) if f.endswith('.txt')] if os.path.exists(essay_folder) else []
|
|
|
|
|
|
|
117 |
|
|
|
118 |
selected_subject = st.sidebar.selectbox("Select a Subject:", subjects)
|
119 |
|
120 |
+
# Process the selected subject
|
121 |
raw_text = ""
|
122 |
if content_type == "Current Affairs" and selected_subject:
|
123 |
subject_folder = os.path.join(data_folder, selected_subject)
|
|
|
128 |
with open(subject_file, "r", encoding="utf-8") as file:
|
129 |
raw_text = file.read()
|
130 |
|
131 |
+
# Display preview of notes and load vectorstore
|
132 |
if raw_text:
|
133 |
st.subheader("Preview of Notes")
|
134 |
+
st.text_area("Preview Content:", value=raw_text[:1000], height=300, disabled=True) # Display shorter preview
|
135 |
|
136 |
+
# Preload vectorstore if not already cached
|
137 |
+
if "vectorstore" not in st.session_state or st.session_state.vectorstore is None:
|
138 |
+
text_chunks = get_chunks(raw_text)
|
139 |
+
st.session_state.vectorstore = get_vectorstore(text_chunks)
|
140 |
else:
|
141 |
st.warning("No content available for the selected subject.")
|
142 |
|