Update loaders/common.py
Browse files- loaders/common.py +42 -8
loaders/common.py
CHANGED
@@ -6,6 +6,24 @@ from langchain.schema import Document
|
|
6 |
import streamlit as st
|
7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
from stats import add_usage
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def process_file(vector_store, file, loader_class, file_suffix, stats_db=None):
|
11 |
documents = []
|
@@ -30,17 +48,30 @@ def process_file(vector_store, file, loader_class, file_suffix, stats_db=None):
|
|
30 |
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
31 |
|
32 |
documents = text_splitter.split_documents(documents)
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
try:
|
|
|
|
|
|
|
|
|
40 |
vector_store.add_documents(docs_with_metadata)
|
|
|
41 |
if stats_db:
|
42 |
-
add_usage(stats_db, "embedding", "file", metadata={"file_name": file_name,
|
43 |
-
"
|
|
|
|
|
44 |
except Exception as e:
|
45 |
print(f"Error adding documents to vector store:")
|
46 |
print(f"Exception: {str(e)}")
|
@@ -54,4 +85,7 @@ def process_file(vector_store, file, loader_class, file_suffix, stats_db=None):
|
|
54 |
print(f"First document preview (truncated):")
|
55 |
if docs_with_metadata:
|
56 |
print(docs_with_metadata[0].page_content[:500])
|
57 |
-
|
|
|
|
|
|
|
|
6 |
import streamlit as st
|
7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
from stats import add_usage
|
9 |
+
import re
|
10 |
+
|
11 |
+
def clean_chat_text(text):
|
12 |
+
"""Clean chat export text to remove special characters and format consistently"""
|
13 |
+
# Remove non-printable characters
|
14 |
+
text = ''.join(char for char in text if char.isprintable())
|
15 |
+
|
16 |
+
# Clean up WhatsApp-style timestamps and phone numbers
|
17 |
+
text = re.sub(r'\[\d{1,2}/\d{1,2}/\d{2,4},\s+\d{1,2}:\d{1,2}:\d{1,2}\s+[AP]M\]', '', text)
|
18 |
+
text = re.sub(r'\+\d{2,3}\s*\d{3,10}\s*\d{3,10}', '', text)
|
19 |
+
|
20 |
+
# Remove joining messages
|
21 |
+
text = re.sub(r'joined using this group\'s invite link', '', text)
|
22 |
+
|
23 |
+
# Remove extra whitespace
|
24 |
+
text = ' '.join(text.split())
|
25 |
+
|
26 |
+
return text
|
27 |
|
28 |
def process_file(vector_store, file, loader_class, file_suffix, stats_db=None):
|
29 |
documents = []
|
|
|
48 |
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
49 |
|
50 |
documents = text_splitter.split_documents(documents)
|
51 |
+
|
52 |
+
# Clean the text content before creating metadata
|
53 |
+
docs_with_metadata = [Document(page_content=clean_chat_text(doc.page_content),
|
54 |
+
metadata={"file_sha1": file_sha1,
|
55 |
+
"file_size": file_size,
|
56 |
+
"file_name": file_name,
|
57 |
+
"chunk_size": chunk_size,
|
58 |
+
"chunk_overlap": chunk_overlap,
|
59 |
+
"date": dateshort,
|
60 |
+
"user": st.session_state["username"]})
|
61 |
+
for doc in documents]
|
62 |
|
63 |
try:
|
64 |
+
# Add debug logging before vector store addition
|
65 |
+
print(f"Attempting to add {len(docs_with_metadata)} documents")
|
66 |
+
print(f"Sample cleaned content: {docs_with_metadata[0].page_content[:200] if docs_with_metadata else 'No documents'}")
|
67 |
+
|
68 |
vector_store.add_documents(docs_with_metadata)
|
69 |
+
|
70 |
if stats_db:
|
71 |
+
add_usage(stats_db, "embedding", "file", metadata={"file_name": file_name,
|
72 |
+
"file_type": file_suffix,
|
73 |
+
"chunk_size": chunk_size,
|
74 |
+
"chunk_overlap": chunk_overlap})
|
75 |
except Exception as e:
|
76 |
print(f"Error adding documents to vector store:")
|
77 |
print(f"Exception: {str(e)}")
|
|
|
85 |
print(f"First document preview (truncated):")
|
86 |
if docs_with_metadata:
|
87 |
print(docs_with_metadata[0].page_content[:500])
|
88 |
+
|
89 |
+
# Additional debug info for vector store
|
90 |
+
print(f"Vector store type: {type(vector_store).__name__}")
|
91 |
+
raise
|