File size: 1,298 Bytes
91877b0
 
 
 
 
 
 
 
 
 
 
 
 
 
19bc9fb
91877b0
 
 
 
 
 
 
 
 
 
 
 
 
 
19bc9fb
91877b0
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import os
import pandas as pd
from tqdm import tqdm
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import SupabaseVectorStore
from langchain.schema.document import Document
from supabase import create_client, Client

# --- Load Environment Variables ---
SUPABASE_URL = os.getenv("SUPABASE_URL")
SUPABASE_KEY = os.getenv("SUPABASE_KEY")

# --- Init Supabase & Embeddings ---
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")  # Or OpenAIEmbeddings if you use Groq

# --- Read CSV File ---
df = pd.read_csv("supabase_docs.csv")  # Assuming columns: 'content', 'metadata' or just 'content'

# --- Convert rows to LangChain Document objects ---
documents = []
for _, row in tqdm(df.iterrows(), total=len(df)):
    content = str(row["content"])
    metadata = row.drop("content").to_dict() if "content" in row else {}
    documents.append(Document(page_content=content, metadata=metadata))

# --- Create Supabase Vector Store and Upload ---
vectorstore = SupabaseVectorStore.from_documents(
    documents=documents,
    embedding=embeddings,
    client=supabase,
    table_name="documents",
    query_name="match_documents_langchain"
)

print("βœ… Upload complete.")