willco-afk commited on
Commit
1764725
·
verified ·
1 Parent(s): 5f40423

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -75
app.py CHANGED
@@ -1,85 +1,63 @@
1
- from fastapi import FastAPI, HTTPException
2
- import requests
3
- import oci
4
- import json
5
- from datetime import datetime
 
6
 
7
- app = FastAPI()
 
 
 
8
 
9
- # Oracle Cloud Configurations (Ensure your OCI config file is set up correctly)
10
- config = oci.config.from_file("~/.oci/config", "DEFAULT")
11
- signer = oci.auth.signers.get_resource_principals_signer()
12
 
13
- # Endpoint URL for Oracle AI Service (Replace region appropriately)
14
- oci_endpoint = "https://anomalydetection.aiservice.{{region}}.oci.oraclecloud.com/20210101/aiPrivateEndpoints"
15
 
16
- # Request headers including authentication (ensure the correct signing/authentication process)
17
- headers = {
18
- "Content-Type": "application/json",
19
- "Authorization": "Bearer {access_token}" # Use the appropriate method to authenticate
20
- }
21
 
22
- @app.post("/create-private-endpoint")
23
- async def create_private_endpoint(compartment_id: str, subnet_id: str, display_name: str, dns_zones: list):
24
- """Creates a private reverse connection endpoint in Oracle Cloud"""
25
-
26
- # Prepare the request body
27
- request_body = {
28
- "compartmentId": compartment_id,
29
- "subnetId": subnet_id,
30
- "displayName": display_name,
31
- "dnsZones": dns_zones,
32
- "definedTags": {},
33
- "freeformTags": {}
34
- }
35
-
36
- response = requests.post(
37
- oci_endpoint,
38
- headers=headers,
39
- data=json.dumps(request_body)
40
- )
41
-
42
- if response.status_code == 202:
43
- return {"message": "Private endpoint created successfully", "status": response.status_code}
44
- else:
45
- raise HTTPException(status_code=response.status_code, detail=response.json())
46
 
47
- @app.put("/update-private-endpoint/{endpoint_id}")
48
- async def update_private_endpoint(endpoint_id: str, display_name: str, dns_zones: list):
49
- """Updates a private reverse connection endpoint"""
50
-
51
- update_url = f"{oci_endpoint}/{endpoint_id}"
52
-
53
- request_body = {
54
- "displayName": display_name,
55
- "dnsZones": dns_zones,
56
- "definedTags": {},
57
- "freeformTags": {}
58
- }
59
-
60
- response = requests.put(
61
- update_url,
62
- headers=headers,
63
- data=json.dumps(request_body)
64
- )
65
-
66
- if response.status_code == 202:
67
- return {"message": "Private endpoint updated successfully", "status": response.status_code}
68
- else:
69
- raise HTTPException(status_code=response.status_code, detail=response.json())
70
 
71
- @app.get("/get-private-endpoint/{endpoint_id}")
72
- async def get_private_endpoint(endpoint_id: str):
73
- """Retrieves a private reverse connection endpoint by its ID"""
 
74
 
75
- get_url = f"{oci_endpoint}/{endpoint_id}"
 
 
76
 
77
- response = requests.get(
78
- get_url,
79
- headers=headers
80
- )
81
 
82
- if response.status_code == 200:
83
- return response.json()
84
- else:
85
- raise HTTPException(status_code=response.status_code, detail=response.json())
 
 
 
 
 
1
+ import gradio as gr
2
+ import chromadb
3
+ from transformers import AutoTokenizer, AutoModel
4
+ import faiss
5
+ import numpy as np
6
+ import torch
7
 
8
+ # Load the pre-trained model and tokenizer
9
+ model_name = "sentence-transformers/all-MiniLM-L6-v2"
10
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
11
+ model = AutoModel.from_pretrained(model_name)
12
 
13
+ # Initialize Chroma client
14
+ client = chromadb.Client()
 
15
 
16
+ # Create a Chroma collection
17
+ collection = client.create_collection(name="tree_images")
18
 
19
+ # Example data (you can replace this with your actual content or dataset)
20
+ content = ["Tree 1: Decorated with lights", "Tree 2: Undecorated", "Tree 3: Decorated with ornaments"]
 
 
 
21
 
22
+ # Function to generate embeddings using the pre-trained model
23
+ def generate_embeddings(texts):
24
+ embeddings = []
25
+ for text in texts:
26
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
27
+ with torch.no_grad():
28
+ output = model(**inputs)
29
+ embeddings.append(output.last_hidden_state.mean(dim=1).squeeze().numpy())
30
+ return embeddings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ # Generate embeddings for the content
33
+ embeddings = generate_embeddings(content)
34
+
35
+ # Add the embeddings to Chroma
36
+ for idx, text in enumerate(content):
37
+ collection.add(embedding=embeddings[idx], document=text, metadatas={"id": idx})
38
+
39
+ # Build FAISS index for efficient retrieval
40
+ embeddings_np = np.array(embeddings).astype('float32')
41
+ faiss_index = faiss.IndexFlatL2(embeddings_np.shape[1])
42
+ faiss_index.add(embeddings_np)
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ # Define the search function for Gradio interface
45
+ def search(query):
46
+ # Generate embedding for the query
47
+ query_embedding = generate_embeddings([query])[0].reshape(1, -1)
48
 
49
+ # FAISS-based search
50
+ distances, indices = faiss_index.search(query_embedding, 3)
51
+ faiss_results = [content[i] for i in indices[0]]
52
 
53
+ # Chroma-based search
54
+ chroma_results = collection.query(query_embeddings=query_embedding, n_results=3)["documents"]
 
 
55
 
56
+ # Return results
57
+ return "FAISS Results: " + ", ".join(faiss_results) + "\nChroma Results: " + ", ".join(chroma_results)
58
+
59
+ # Create the Gradio interface
60
+ interface = gr.Interface(fn=search, inputs="text", outputs="text")
61
+
62
+ # Launch the Gradio interface
63
+ interface.launch()