Spaces:
Running
Running
Create retriever.py
Browse files- retriever.py +35 -0
retriever.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# retriever.py
|
2 |
+
|
3 |
+
from typing import Any, Dict, List
|
4 |
+
import torch
|
5 |
+
from langchain.tools.retriever import create_retriever_tool
|
6 |
+
|
7 |
+
class MultiModalRetriever:
|
8 |
+
"""
|
9 |
+
Enhanced retrieval system that integrates text, image, and code snippet search.
|
10 |
+
"""
|
11 |
+
def __init__(self, text_retriever: Any, clip_model: Any, clip_processor: Any) -> None:
|
12 |
+
self.text_retriever = text_retriever
|
13 |
+
self.clip_model = clip_model
|
14 |
+
self.clip_processor = clip_processor
|
15 |
+
self.code_retriever = create_retriever_tool([], "Code Retriever", "Retriever for code snippets")
|
16 |
+
|
17 |
+
def retrieve(self, query: str, domain: str) -> Dict[str, List]:
|
18 |
+
return {
|
19 |
+
"text": self._retrieve_text(query),
|
20 |
+
"images": self._retrieve_images(query),
|
21 |
+
"code": self._retrieve_code(query)
|
22 |
+
}
|
23 |
+
|
24 |
+
def _retrieve_text(self, query: str) -> List[Any]:
|
25 |
+
return self.text_retriever.invoke(query)
|
26 |
+
|
27 |
+
def _retrieve_images(self, query: str) -> List[str]:
|
28 |
+
inputs = self.clip_processor(text=query, return_tensors="pt")
|
29 |
+
with torch.no_grad():
|
30 |
+
_ = self.clip_model.get_text_features(**inputs)
|
31 |
+
# Placeholder for image retrieval results
|
32 |
+
return ["image_result_1.png", "image_result_2.png"]
|
33 |
+
|
34 |
+
def _retrieve_code(self, query: str) -> List[str]:
|
35 |
+
return self.code_retriever.invoke(query)
|