Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,85 +1,63 @@
|
|
1 |
-
|
2 |
-
import
|
3 |
-
import
|
4 |
-
import
|
5 |
-
|
|
|
6 |
|
7 |
-
|
|
|
|
|
|
|
8 |
|
9 |
-
#
|
10 |
-
|
11 |
-
signer = oci.auth.signers.get_resource_principals_signer()
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
|
16 |
-
#
|
17 |
-
|
18 |
-
"Content-Type": "application/json",
|
19 |
-
"Authorization": "Bearer {access_token}" # Use the appropriate method to authenticate
|
20 |
-
}
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
"dnsZones": dns_zones,
|
32 |
-
"definedTags": {},
|
33 |
-
"freeformTags": {}
|
34 |
-
}
|
35 |
-
|
36 |
-
response = requests.post(
|
37 |
-
oci_endpoint,
|
38 |
-
headers=headers,
|
39 |
-
data=json.dumps(request_body)
|
40 |
-
)
|
41 |
-
|
42 |
-
if response.status_code == 202:
|
43 |
-
return {"message": "Private endpoint created successfully", "status": response.status_code}
|
44 |
-
else:
|
45 |
-
raise HTTPException(status_code=response.status_code, detail=response.json())
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
}
|
59 |
-
|
60 |
-
response = requests.put(
|
61 |
-
update_url,
|
62 |
-
headers=headers,
|
63 |
-
data=json.dumps(request_body)
|
64 |
-
)
|
65 |
-
|
66 |
-
if response.status_code == 202:
|
67 |
-
return {"message": "Private endpoint updated successfully", "status": response.status_code}
|
68 |
-
else:
|
69 |
-
raise HTTPException(status_code=response.status_code, detail=response.json())
|
70 |
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
74 |
|
75 |
-
|
|
|
|
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
headers=headers
|
80 |
-
)
|
81 |
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import chromadb
|
3 |
+
from transformers import AutoTokenizer, AutoModel
|
4 |
+
import faiss
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
|
8 |
+
# Load the pre-trained model and tokenizer
|
9 |
+
model_name = "sentence-transformers/all-MiniLM-L6-v2"
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
+
model = AutoModel.from_pretrained(model_name)
|
12 |
|
13 |
+
# Initialize Chroma client
|
14 |
+
client = chromadb.Client()
|
|
|
15 |
|
16 |
+
# Create a Chroma collection
|
17 |
+
collection = client.create_collection(name="tree_images")
|
18 |
|
19 |
+
# Example data (you can replace this with your actual content or dataset)
|
20 |
+
content = ["Tree 1: Decorated with lights", "Tree 2: Undecorated", "Tree 3: Decorated with ornaments"]
|
|
|
|
|
|
|
21 |
|
22 |
+
# Function to generate embeddings using the pre-trained model
|
23 |
+
def generate_embeddings(texts):
|
24 |
+
embeddings = []
|
25 |
+
for text in texts:
|
26 |
+
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
27 |
+
with torch.no_grad():
|
28 |
+
output = model(**inputs)
|
29 |
+
embeddings.append(output.last_hidden_state.mean(dim=1).squeeze().numpy())
|
30 |
+
return embeddings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
# Generate embeddings for the content
|
33 |
+
embeddings = generate_embeddings(content)
|
34 |
+
|
35 |
+
# Add the embeddings to Chroma
|
36 |
+
for idx, text in enumerate(content):
|
37 |
+
collection.add(embedding=embeddings[idx], document=text, metadatas={"id": idx})
|
38 |
+
|
39 |
+
# Build FAISS index for efficient retrieval
|
40 |
+
embeddings_np = np.array(embeddings).astype('float32')
|
41 |
+
faiss_index = faiss.IndexFlatL2(embeddings_np.shape[1])
|
42 |
+
faiss_index.add(embeddings_np)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
# Define the search function for Gradio interface
|
45 |
+
def search(query):
|
46 |
+
# Generate embedding for the query
|
47 |
+
query_embedding = generate_embeddings([query])[0].reshape(1, -1)
|
48 |
|
49 |
+
# FAISS-based search
|
50 |
+
distances, indices = faiss_index.search(query_embedding, 3)
|
51 |
+
faiss_results = [content[i] for i in indices[0]]
|
52 |
|
53 |
+
# Chroma-based search
|
54 |
+
chroma_results = collection.query(query_embeddings=query_embedding, n_results=3)["documents"]
|
|
|
|
|
55 |
|
56 |
+
# Return results
|
57 |
+
return "FAISS Results: " + ", ".join(faiss_results) + "\nChroma Results: " + ", ".join(chroma_results)
|
58 |
+
|
59 |
+
# Create the Gradio interface
|
60 |
+
interface = gr.Interface(fn=search, inputs="text", outputs="text")
|
61 |
+
|
62 |
+
# Launch the Gradio interface
|
63 |
+
interface.launch()
|