id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
36481f540ad3-2
self, topic: str, now: Optional[datetime] = None ) -> List[str]: """Generate 'insights' on a topic of reflection, based on pertinent memories.""" prompt = PromptTemplate.from_template( "Statements relevant to: '{topic}'\n" "---\n" "{related_statements}\n" "---\n" "What 5 high-level novel insights can you infer from the above statements " "that are relevant for answering the following question?\n" "Do not include any insights that are not relevant to the question.\n" "Do not repeat any insights that have already been made.\n\n" "Question: {topic}\n\n" "(example format: insight (because of 1, 5, 3))\n" ) related_memories = self.fetch_memories(topic, now=now) related_statements = "\n".join( [ self._format_memory_detail(memory, prefix=f"{i+1}. ") for i, memory in enumerate(related_memories) ] ) result = self.chain(prompt).run( topic=topic, related_statements=related_statements ) # TODO: Parse the connections between memories and insights return self._parse_list(result) [docs] def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]: """Reflect on recent observations and generate 'insights'.""" if self.verbose: logger.info("Character is reflecting") new_insights = [] topics = self._get_topics_of_reflection() for topic in topics: insights = self._get_insights_on_topic(topic, now=now)
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
36481f540ad3-3
insights = self._get_insights_on_topic(topic, now=now) for insight in insights: self.add_memory(insight, now=now) new_insights.extend(insights) return new_insights def _score_memory_importance(self, memory_content: str) -> float: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the" + " following piece of memory. Respond with a single integer." + "\nMemory: {memory_content}" + "\nRating: " ) score = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance score: {score}") match = re.search(r"^\D*(\d+)", score) if match: return (float(match.group(1)) / 10) * self.importance_weight else: return 0.0 def _score_memories_importance(self, memory_content: str) -> List[float]: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the"
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
36481f540ad3-4
+ " acceptance), rate the likely poignancy of the" + " following piece of memory. Always answer with only a list of numbers." + " If just given one memory still respond in a list." + " Memories are separated by semi colans (;)" + "\Memories: {memory_content}" + "\nRating: " ) scores = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance scores: {scores}") # Split into list of strings and convert to floats scores_list = [float(x) for x in scores.split(";")] return scores_list [docs] def add_memories( self, memory_content: str, now: Optional[datetime] = None ) -> List[str]: """Add an observations or memories to the agent's memory.""" importance_scores = self._score_memories_importance(memory_content) self.aggregate_importance += max(importance_scores) memory_list = memory_content.split(";") documents = [] for i in range(len(memory_list)): documents.append( Document( page_content=memory_list[i], metadata={"importance": importance_scores[i]}, ) ) result = self.memory_retriever.add_documents(documents, current_time=now) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold and not self.reflecting ): self.reflecting = True
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
36481f540ad3-5
and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result [docs] def add_memory( self, memory_content: str, now: Optional[datetime] = None ) -> List[str]: """Add an observation or memory to the agent's memory.""" importance_score = self._score_memory_importance(memory_content) self.aggregate_importance += importance_score document = Document( page_content=memory_content, metadata={"importance": importance_score} ) result = self.memory_retriever.add_documents([document], current_time=now) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result [docs] def fetch_memories( self, observation: str, now: Optional[datetime] = None ) -> List[Document]: """Fetch related memories.""" if now is not None: with mock_now(now): return self.memory_retriever.get_relevant_documents(observation) else: return self.memory_retriever.get_relevant_documents(observation)
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
36481f540ad3-6
else: return self.memory_retriever.get_relevant_documents(observation) def format_memories_detail(self, relevant_memories: List[Document]) -> str: content = [] for mem in relevant_memories: content.append(self._format_memory_detail(mem, prefix="- ")) return "\n".join([f"{mem}" for mem in content]) def _format_memory_detail(self, memory: Document, prefix: str = "") -> str: created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p") return f"{prefix}[{created_time}] {memory.page_content.strip()}" def format_memories_simple(self, relevant_memories: List[Document]) -> str: return "; ".join([f"{mem.page_content}" for mem in relevant_memories]) def _get_memories_until_limit(self, consumed_tokens: int) -> str: """Reduce the number of tokens in the documents.""" result = [] for doc in self.memory_retriever.memory_stream[::-1]: if consumed_tokens >= self.max_tokens_limit: break consumed_tokens += self.llm.get_num_tokens(doc.page_content) if consumed_tokens < self.max_tokens_limit: result.append(doc) return self.format_memories_simple(result) @property def memory_variables(self) -> List[str]: """Input keys this memory class will load dynamically.""" return [] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return key-value pairs given the text input to the chain.""" queries = inputs.get(self.queries_key) now = inputs.get(self.now_key) if queries is not None:
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
36481f540ad3-7
now = inputs.get(self.now_key) if queries is not None: relevant_memories = [ mem for query in queries for mem in self.fetch_memories(query, now=now) ] return { self.relevant_memories_key: self.format_memories_detail( relevant_memories ), self.relevant_memories_simple_key: self.format_memories_simple( relevant_memories ), } most_recent_memories_token = inputs.get(self.most_recent_memories_token_key) if most_recent_memories_token is not None: return { self.most_recent_memories_key: self._get_memories_until_limit( most_recent_memories_token ) } return {} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None: """Save the context of this model run to memory.""" # TODO: fix the save memory key mem = outputs.get(self.add_memory_key) now = outputs.get(self.now_key) if mem: self.add_memory(mem, now=now) [docs] def clear(self) -> None: """Clear memory contents.""" # TODO By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
5164b97cae39-0
Source code for langchain.experimental.generative_agents.generative_agent import re from datetime import datetime from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.experimental.generative_agents.memory import GenerativeAgentMemory from langchain.prompts import PromptTemplate [docs]class GenerativeAgent(BaseModel): """A character with memory and innate characteristics.""" name: str """The character's name.""" age: Optional[int] = None """The optional age of the character.""" traits: str = "N/A" """Permanent traits to ascribe to the character.""" status: str """The traits of the character you wish not to change.""" memory: GenerativeAgentMemory """The memory object that combines relevance, recency, and 'importance'.""" llm: BaseLanguageModel """The underlying language model.""" verbose: bool = False summary: str = "" #: :meta private: """Stateful self-summary generated via reflection on the character's memory.""" summary_refresh_seconds: int = 3600 #: :meta private: """How frequently to re-generate the summary.""" last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private: """The last time the character's summary was regenerated.""" daily_summaries: List[str] = Field(default_factory=list) # : :meta private: """Summary of the events in the plan that the agent took.""" [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # LLM-related methods @staticmethod
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5164b97cae39-1
arbitrary_types_allowed = True # LLM-related methods @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] def chain(self, prompt: PromptTemplate) -> LLMChain: return LLMChain( llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory ) def _get_entity_from_observation(self, observation: str) -> str: prompt = PromptTemplate.from_template( "What is the observed entity in the following observation? {observation}" + "\nEntity=" ) return self.chain(prompt).run(observation=observation).strip() def _get_entity_action(self, observation: str, entity_name: str) -> str: prompt = PromptTemplate.from_template( "What is the {entity} doing in the following observation? {observation}" + "\nThe {entity} is" ) return ( self.chain(prompt).run(entity=entity_name, observation=observation).strip() ) [docs] def summarize_related_memories(self, observation: str) -> str: """Summarize memories that are most relevant to an observation.""" prompt = PromptTemplate.from_template( """ {q1}? Context from memory: {relevant_memories} Relevant context: """ ) entity_name = self._get_entity_from_observation(observation) entity_action = self._get_entity_action(observation, entity_name)
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5164b97cae39-2
entity_action = self._get_entity_action(observation, entity_name) q1 = f"What is the relationship between {self.name} and {entity_name}" q2 = f"{entity_name} is {entity_action}" return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip() def _generate_reaction( self, observation: str, suffix: str, now: Optional[datetime] = None ) -> str: """React to a given observation or dialogue act.""" prompt = PromptTemplate.from_template( "{agent_summary_description}" + "\nIt is {current_time}." + "\n{agent_name}'s status: {agent_status}" + "\nSummary of relevant context from {agent_name}'s memory:" + "\n{relevant_memories}" + "\nMost recent observations: {most_recent_memories}" + "\nObservation: {observation}" + "\n\n" + suffix ) agent_summary_description = self.get_summary(now=now) relevant_memories_str = self.summarize_related_memories(observation) current_time_str = ( datetime.now().strftime("%B %d, %Y, %I:%M %p") if now is None else now.strftime("%B %d, %Y, %I:%M %p") ) kwargs: Dict[str, Any] = dict( agent_summary_description=agent_summary_description, current_time=current_time_str, relevant_memories=relevant_memories_str, agent_name=self.name, observation=observation, agent_status=self.status, ) consumed_tokens = self.llm.get_num_tokens(
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5164b97cae39-3
) consumed_tokens = self.llm.get_num_tokens( prompt.format(most_recent_memories="", **kwargs) ) kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens return self.chain(prompt=prompt).run(**kwargs).strip() def _clean_response(self, text: str) -> str: return re.sub(f"^{self.name} ", "", text.strip()).strip() [docs] def generate_reaction( self, observation: str, now: Optional[datetime] = None ) -> Tuple[bool, str]: """React to a given observation.""" call_to_action_template = ( "Should {agent_name} react to the observation, and if so," + " what would be an appropriate reaction? Respond in one line." + ' If the action is to engage in dialogue, write:\nSAY: "what to say"' + "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)." + "\nEither do nothing, react, or say something but not both.\n\n" ) full_result = self._generate_reaction( observation, call_to_action_template, now=now ) result = full_result.strip().split("\n")[0] # AAA self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and reacted by {result}", self.memory.now_key: now, }, ) if "REACT:" in result: reaction = self._clean_response(result.split("REACT:")[-1]) return False, f"{self.name} {reaction}" if "SAY:" in result:
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5164b97cae39-4
if "SAY:" in result: said_value = self._clean_response(result.split("SAY:")[-1]) return True, f"{self.name} said {said_value}" else: return False, result [docs] def generate_dialogue_response( self, observation: str, now: Optional[datetime] = None ) -> Tuple[bool, str]: """React to a given observation.""" call_to_action_template = ( "What would {agent_name} say? To end the conversation, write:" ' GOODBYE: "what to say". Otherwise to continue the conversation,' ' write: SAY: "what to say next"\n\n' ) full_result = self._generate_reaction( observation, call_to_action_template, now=now ) result = full_result.strip().split("\n")[0] if "GOODBYE:" in result: farewell = self._clean_response(result.split("GOODBYE:")[-1]) self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and said {farewell}", self.memory.now_key: now, }, ) return False, f"{self.name} said {farewell}" if "SAY:" in result: response_text = self._clean_response(result.split("SAY:")[-1]) self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and said {response_text}", self.memory.now_key: now, }, ) return True, f"{self.name} said {response_text}"
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5164b97cae39-5
) return True, f"{self.name} said {response_text}" else: return False, result ###################################################### # Agent stateful' summary methods. # # Each dialog or response prompt includes a header # # summarizing the agent's self-description. This is # # updated periodically through probing its memories # ###################################################### def _compute_agent_summary(self) -> str: """""" prompt = PromptTemplate.from_template( "How would you summarize {name}'s core characteristics given the" + " following statements:\n" + "{relevant_memories}" + "Do not embellish." + "\n\nSummary: " ) # The agent seeks to think about their core characteristics. return ( self.chain(prompt) .run(name=self.name, queries=[f"{self.name}'s core characteristics"]) .strip() ) [docs] def get_summary( self, force_refresh: bool = False, now: Optional[datetime] = None ) -> str: """Return a descriptive summary of the agent.""" current_time = datetime.now() if now is None else now since_refresh = (current_time - self.last_refreshed).seconds if ( not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh ): self.summary = self._compute_agent_summary() self.last_refreshed = current_time age = self.age if self.age is not None else "N/A" return ( f"Name: {self.name} (age: {age})" + f"\nInnate traits: {self.traits}"
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5164b97cae39-6
+ f"\nInnate traits: {self.traits}" + f"\n{self.summary}" ) [docs] def get_full_header( self, force_refresh: bool = False, now: Optional[datetime] = None ) -> str: """Return a full header of the agent's status, summary, and current time.""" now = datetime.now() if now is None else now summary = self.get_summary(force_refresh=force_refresh, now=now) current_time_str = now.strftime("%B %d, %Y, %I:%M %p") return ( f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}" ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
a188dffdda6e-0
Source code for langchain.retrievers.remote_retriever from typing import List, Optional import aiohttp import requests from pydantic import BaseModel from langchain.schema import BaseRetriever, Document [docs]class RemoteLangChainRetriever(BaseRetriever, BaseModel): url: str headers: Optional[dict] = None input_key: str = "message" response_key: str = "response" page_content_key: str = "page_content" metadata_key: str = "metadata" [docs] def get_relevant_documents(self, query: str) -> List[Document]: response = requests.post( self.url, json={self.input_key: query}, headers=self.headers ) result = response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ] [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: async with aiohttp.ClientSession() as session: async with session.request( "POST", self.url, headers=self.headers, json={self.input_key: query} ) as response: result = await response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/remote_retriever.html
ff7b2e1a7faa-0
Source code for langchain.retrievers.vespa_retriever """Wrapper for retrieving documents from Vespa.""" from __future__ import annotations import json from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Sequence, Union from langchain.schema import BaseRetriever, Document if TYPE_CHECKING: from vespa.application import Vespa [docs]class VespaRetriever(BaseRetriever): def __init__( self, app: Vespa, body: Dict, content_field: str, metadata_fields: Optional[Sequence[str]] = None, ): self._application = app self._query_body = body self._content_field = content_field self._metadata_fields = metadata_fields or () def _query(self, body: Dict) -> List[Document]: response = self._application.query(body) if not str(response.status_code).startswith("2"): raise RuntimeError( "Could not retrieve data from Vespa. Error code: {}".format( response.status_code ) ) root = response.json["root"] if "errors" in root: raise RuntimeError(json.dumps(root["errors"])) docs = [] for child in response.hits: page_content = child["fields"].pop(self._content_field, "") if self._metadata_fields == "*": metadata = child["fields"] else: metadata = {mf: child["fields"].get(mf) for mf in self._metadata_fields} metadata["id"] = child["id"] docs.append(Document(page_content=page_content, metadata=metadata)) return docs
https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
ff7b2e1a7faa-1
docs.append(Document(page_content=page_content, metadata=metadata)) return docs [docs] def get_relevant_documents(self, query: str) -> List[Document]: body = self._query_body.copy() body["query"] = query return self._query(body) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError [docs] def get_relevant_documents_with_filter( self, query: str, *, _filter: Optional[str] = None ) -> List[Document]: body = self._query_body.copy() _filter = f" and {_filter}" if _filter else "" body["yql"] = body["yql"] + _filter body["query"] = query return self._query(body) [docs] @classmethod def from_params( cls, url: str, content_field: str, *, k: Optional[int] = None, metadata_fields: Union[Sequence[str], Literal["*"]] = (), sources: Union[Sequence[str], Literal["*"], None] = None, _filter: Optional[str] = None, yql: Optional[str] = None, **kwargs: Any, ) -> VespaRetriever: """Instantiate retriever from params. Args: url (str): Vespa app URL. content_field (str): Field in results to return as Document page_content. k (Optional[int]): Number of Documents to return. Defaults to None. metadata_fields(Sequence[str] or "*"): Fields in results to include in document metadata. Defaults to empty tuple ().
https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
ff7b2e1a7faa-2
document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None): Sources to retrieve from. Defaults to None. _filter (Optional[str]): Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]): Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any): Keyword arguments added to query body. """ try: from vespa.application import Vespa except ImportError: raise ImportError( "pyvespa is not installed, please install with `pip install pyvespa`" ) app = Vespa(url) body = kwargs.copy() if yql and (sources or _filter): raise ValueError( "yql should only be specified if both sources and _filter are not " "specified." ) else: if metadata_fields == "*": _fields = "*" body["summary"] = "short" else: _fields = ", ".join([content_field] + list(metadata_fields or [])) _sources = ", ".join(sources) if isinstance(sources, Sequence) else "*" _filter = f" and {_filter}" if _filter else "" yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}" body["yql"] = yql if k: body["hits"] = k return cls(app, body, content_field, metadata_fields=metadata_fields) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
a30c4d813f67-0
Source code for langchain.retrievers.pupmed from typing import List from langchain.schema import BaseRetriever, Document from langchain.utilities.pupmed import PubMedAPIWrapper [docs]class PubMedRetriever(BaseRetriever, PubMedAPIWrapper): """ It is effectively a wrapper for PubMedAPIWrapper. It wraps load() to get_relevant_documents(). It uses all PubMedAPIWrapper arguments without any change. """ [docs] def get_relevant_documents(self, query: str) -> List[Document]: return self.load_docs(query=query) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/pupmed.html
19f8fca17d41-0
Source code for langchain.retrievers.tfidf """TF-IDF Retriever. Largely based on https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb""" from __future__ import annotations from typing import Any, Dict, Iterable, List, Optional from pydantic import BaseModel from langchain.schema import BaseRetriever, Document [docs]class TFIDFRetriever(BaseRetriever, BaseModel): vectorizer: Any docs: List[Document] tfidf_array: Any k: int = 4 class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: Iterable[str], metadatas: Optional[Iterable[dict]] = None, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> TFIDFRetriever: try: from sklearn.feature_extraction.text import TfidfVectorizer except ImportError: raise ImportError( "Could not import scikit-learn, please install with `pip install " "scikit-learn`." ) tfidf_params = tfidf_params or {} vectorizer = TfidfVectorizer(**tfidf_params) tfidf_array = vectorizer.fit_transform(texts) metadatas = metadatas or ({} for _ in texts) docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)]
https://python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html
19f8fca17d41-1
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs) [docs] @classmethod def from_documents( cls, documents: Iterable[Document], *, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> TFIDFRetriever: texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents)) return cls.from_texts( texts=texts, tfidf_params=tfidf_params, metadatas=metadatas, **kwargs ) [docs] def get_relevant_documents(self, query: str) -> List[Document]: from sklearn.metrics.pairwise import cosine_similarity query_vec = self.vectorizer.transform( [query] ) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats) results = cosine_similarity(self.tfidf_array, query_vec).reshape( (-1,) ) # Op -- (n_docs,1) -- Cosine Sim with each doc return_docs = [self.docs[i] for i in results.argsort()[-self.k :][::-1]] return return_docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html
c8ddbcbaa2fc-0
Source code for langchain.retrievers.elastic_search_bm25 """Wrapper around Elasticsearch vector database.""" from __future__ import annotations import uuid from typing import Any, Iterable, List from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]class ElasticSearchBM25Retriever(BaseRetriever): """Wrapper around Elasticsearch using BM25 as a retrieval method. To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the "Deployments" page. To obtain your Elastic Cloud password for the default "elastic" user: 1. Log in to the Elastic Cloud console at https://cloud.elastic.co 2. Go to "Security" > "Users" 3. Locate the "elastic" user and click "Edit" 4. Click "Reset password" 5. Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. """ def __init__(self, client: Any, index_name: str): self.client = client self.index_name = index_name [docs] @classmethod def create(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
c8ddbcbaa2fc-1
self.index_name = index_name [docs] @classmethod def create( cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75 ) -> ElasticSearchBM25Retriever: from elasticsearch import Elasticsearch # Create an Elasticsearch client instance es = Elasticsearch(elasticsearch_url) # Define the index settings and mappings settings = { "analysis": {"analyzer": {"default": {"type": "standard"}}}, "similarity": { "custom_bm25": { "type": "BM25", "k1": k1, "b": b, } }, } mappings = { "properties": { "content": { "type": "text", "similarity": "custom_bm25", # Use the custom BM25 similarity } } } # Create the index with the specified settings and mappings es.indices.create(index=index_name, mappings=mappings, settings=settings) return cls(es, index_name) [docs] def add_texts( self, texts: Iterable[str], refresh_indices: bool = True, ) -> List[str]: """Run more texts through the embeddings and add to the retriver. Args: texts: Iterable of strings to add to the retriever. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the retriever. """ try: from elasticsearch.helpers import bulk except ImportError: raise ValueError( "Could not import elasticsearch python package. "
https://python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
c8ddbcbaa2fc-2
raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) requests = [] ids = [] for i, text in enumerate(texts): _id = str(uuid.uuid4()) request = { "_op_type": "index", "_index": self.index_name, "content": text, "_id": _id, } ids.append(_id) requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids [docs] def get_relevant_documents(self, query: str) -> List[Document]: query_dict = {"query": {"match": {"content": query}}} res = self.client.search(index=self.index_name, body=query_dict) docs = [] for r in res["hits"]["hits"]: docs.append(Document(page_content=r["_source"]["content"])) return docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
b0a5e8d6d52d-0
Source code for langchain.retrievers.wikipedia from typing import List from langchain.schema import BaseRetriever, Document from langchain.utilities.wikipedia import WikipediaAPIWrapper [docs]class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper): """ It is effectively a wrapper for WikipediaAPIWrapper. It wraps load() to get_relevant_documents(). It uses all WikipediaAPIWrapper arguments without any change. """ [docs] def get_relevant_documents(self, query: str) -> List[Document]: return self.load(query=query) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/wikipedia.html
12f3ca851a30-0
Source code for langchain.retrievers.databerry from typing import List, Optional import aiohttp import requests from langchain.schema import BaseRetriever, Document [docs]class DataberryRetriever(BaseRetriever): datastore_url: str top_k: Optional[int] api_key: Optional[str] def __init__( self, datastore_url: str, top_k: Optional[int] = None, api_key: Optional[str] = None, ): self.datastore_url = datastore_url self.api_key = api_key self.top_k = top_k [docs] def get_relevant_documents(self, query: str) -> List[Document]: response = requests.post( self.datastore_url, json={ "query": query, **({"topK": self.top_k} if self.top_k is not None else {}), }, headers={ "Content-Type": "application/json", **( {"Authorization": f"Bearer {self.api_key}"} if self.api_key is not None else {} ), }, ) data = response.json() return [ Document( page_content=r["text"], metadata={"source": r["source"], "score": r["score"]}, ) for r in data["results"] ] [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: async with aiohttp.ClientSession() as session: async with session.request( "POST", self.datastore_url, json={ "query": query,
https://python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html
12f3ca851a30-1
self.datastore_url, json={ "query": query, **({"topK": self.top_k} if self.top_k is not None else {}), }, headers={ "Content-Type": "application/json", **( {"Authorization": f"Bearer {self.api_key}"} if self.api_key is not None else {} ), }, ) as response: data = await response.json() return [ Document( page_content=r["text"], metadata={"source": r["source"], "score": r["score"]}, ) for r in data["results"] ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html
cbdfa619a38d-0
Source code for langchain.retrievers.pinecone_hybrid_search """Taken from: https://docs.pinecone.io/docs/hybrid-search""" import hashlib from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document def hash_text(text: str) -> str: return str(hashlib.sha256(text.encode("utf-8")).hexdigest()) def create_index( contexts: List[str], index: Any, embeddings: Embeddings, sparse_encoder: Any, ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, ) -> None: batch_size = 32 _iterator = range(0, len(contexts), batch_size) try: from tqdm.auto import tqdm _iterator = tqdm(_iterator) except ImportError: pass if ids is None: # create unique ids using hash of the text ids = [hash_text(context) for context in contexts] for i in _iterator: # find end of batch i_end = min(i + batch_size, len(contexts)) # extract batch context_batch = contexts[i:i_end] batch_ids = ids[i:i_end] metadata_batch = ( metadatas[i:i_end] if metadatas else [{} for _ in context_batch] ) # add context passages as metadata meta = [ {"context": context, **metadata} for context, metadata in zip(context_batch, metadata_batch) ] # create dense vectors dense_embeds = embeddings.embed_documents(context_batch)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
cbdfa619a38d-1
# create dense vectors dense_embeds = embeddings.embed_documents(context_batch) # create sparse vectors sparse_embeds = sparse_encoder.encode_documents(context_batch) for s in sparse_embeds: s["values"] = [float(s1) for s1 in s["values"]] vectors = [] # loop through the data and create dictionaries for upserts for doc_id, sparse, dense, metadata in zip( batch_ids, sparse_embeds, dense_embeds, meta ): vectors.append( { "id": doc_id, "sparse_values": sparse, "values": dense, "metadata": metadata, } ) # upload the documents to the new hybrid index index.upsert(vectors) [docs]class PineconeHybridSearchRetriever(BaseRetriever, BaseModel): embeddings: Embeddings sparse_encoder: Any index: Any top_k: int = 4 alpha: float = 0.5 class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def add_texts( self, texts: List[str], ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, ) -> None: create_index( texts, self.index, self.embeddings, self.sparse_encoder, ids=ids, metadatas=metadatas, ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try:
https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
cbdfa619a38d-2
"""Validate that api key and python package exists in environment.""" try: from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401 from pinecone_text.sparse.base_sparse_encoder import ( BaseSparseEncoder, # noqa:F401 ) except ImportError: raise ValueError( "Could not import pinecone_text python package. " "Please install it with `pip install pinecone_text`." ) return values [docs] def get_relevant_documents(self, query: str) -> List[Document]: from pinecone_text.hybrid import hybrid_convex_scale sparse_vec = self.sparse_encoder.encode_queries(query) # convert the question into a dense vector dense_vec = self.embeddings.embed_query(query) # scale alpha with hybrid_scale dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha) sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]] # query pinecone with the query parameters result = self.index.query( vector=dense_vec, sparse_vector=sparse_vec, top_k=self.top_k, include_metadata=True, ) final_result = [] for res in result["matches"]: context = res["metadata"].pop("context") final_result.append( Document(page_content=context, metadata=res["metadata"]) ) # return search results as json return final_result [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
da2be2fe8b23-0
Source code for langchain.retrievers.time_weighted_retriever """Retriever that combines embedding similarity with recency in retrieving values.""" import datetime from copy import deepcopy from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain.schema import BaseRetriever, Document from langchain.vectorstores.base import VectorStore def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float: """Get the hours passed between two datetime objects.""" return (time - ref_time).total_seconds() / 3600 [docs]class TimeWeightedVectorStoreRetriever(BaseRetriever, BaseModel): """Retriever combining embedding similarity with recency.""" vectorstore: VectorStore """The vectorstore to store documents and determine salience.""" search_kwargs: dict = Field(default_factory=lambda: dict(k=100)) """Keyword arguments to pass to the vectorstore similarity search.""" # TODO: abstract as a queue memory_stream: List[Document] = Field(default_factory=list) """The memory_stream of documents to search through.""" decay_rate: float = Field(default=0.01) """The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).""" k: int = 4 """The maximum number of documents to retrieve in a given call.""" other_score_keys: List[str] = [] """Other keys in the metadata to factor into the score, e.g. 'importance'.""" default_salience: Optional[float] = None """The salience to assign memories not retrieved from the vector store. None assigns no salience to documents not fetched from the vector store. """ class Config:
https://python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
da2be2fe8b23-1
""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_combined_score( self, document: Document, vector_relevance: Optional[float], current_time: datetime.datetime, ) -> float: """Return the combined score for a document.""" hours_passed = _get_hours_passed( current_time, document.metadata["last_accessed_at"], ) score = (1.0 - self.decay_rate) ** hours_passed for key in self.other_score_keys: if key in document.metadata: score += document.metadata[key] if vector_relevance is not None: score += vector_relevance return score [docs] def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]: """Return documents that are salient to the query.""" docs_and_scores: List[Tuple[Document, float]] docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs ) results = {} for fetched_doc, relevance in docs_and_scores: if "buffer_idx" in fetched_doc.metadata: buffer_idx = fetched_doc.metadata["buffer_idx"] doc = self.memory_stream[buffer_idx] results[buffer_idx] = (doc, relevance) return results [docs] def get_relevant_documents(self, query: str) -> List[Document]: """Return documents that are relevant to the query.""" current_time = datetime.datetime.now() docs_and_scores = { doc.metadata["buffer_idx"]: (doc, self.default_salience) for doc in self.memory_stream[-self.k :]
https://python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
da2be2fe8b23-2
for doc in self.memory_stream[-self.k :] } # If a doc is considered salient, update the salience score docs_and_scores.update(self.get_salient_docs(query)) rescored_docs = [ (doc, self._get_combined_score(doc, relevance, current_time)) for doc, relevance in docs_and_scores.values() ] rescored_docs.sort(key=lambda x: x[1], reverse=True) result = [] # Ensure frequently accessed memories aren't forgotten for doc, _ in rescored_docs[: self.k]: # TODO: Update vector store doc once `update` method is exposed. buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]] buffered_doc.metadata["last_accessed_at"] = current_time result.append(buffered_doc) return result [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: """Return documents that are relevant to the query.""" raise NotImplementedError [docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i
https://python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
da2be2fe8b23-3
doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return self.vectorstore.add_documents(dup_docs, **kwargs) [docs] async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return await self.vectorstore.aadd_documents(dup_docs, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
c2c8dd5d90a5-0
Source code for langchain.retrievers.weaviate_hybrid_search """Wrapper around weaviate vector database.""" from __future__ import annotations from typing import Any, Dict, List, Optional from uuid import uuid4 from pydantic import Extra from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]class WeaviateHybridSearchRetriever(BaseRetriever): def __init__( self, client: Any, index_name: str, text_key: str, alpha: float = 0.5, k: int = 4, attributes: Optional[List[str]] = None, create_schema_if_missing: bool = True, ): try: import weaviate except ImportError: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(client, weaviate.Client): raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) self._client = client self.k = k self.alpha = alpha self._index_name = index_name self._text_key = text_key self._query_attrs = [self._text_key] if attributes is not None: self._query_attrs.extend(attributes) if create_schema_if_missing: self._create_schema_if_missing() def _create_schema_if_missing(self) -> None: class_obj = { "class": self._index_name, "properties": [{"name": self._text_key, "dataType": ["text"]}],
https://python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
c2c8dd5d90a5-1
"properties": [{"name": self._text_key, "dataType": ["text"]}], "vectorizer": "text2vec-openai", } if not self._client.schema.exists(self._index_name): self._client.schema.create_class(class_obj) [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True # added text_key [docs] def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]: """Upload documents to Weaviate.""" from weaviate.util import get_valid_uuid with self._client.batch as batch: ids = [] for i, doc in enumerate(docs): metadata = doc.metadata or {} data_properties = {self._text_key: doc.page_content, **metadata} # If the UUID of one of the objects already exists # then the existing objectwill be replaced by the new object. if "uuids" in kwargs: _id = kwargs["uuids"][i] else: _id = get_valid_uuid(uuid4()) batch.add_data_object(data_properties, self._index_name, _id) ids.append(_id) return ids [docs] def get_relevant_documents( self, query: str, where_filter: Optional[Dict[str, object]] = None ) -> List[Document]: """Look up similar documents in Weaviate.""" query_obj = self._client.query.get(self._index_name, self._query_attrs) if where_filter: query_obj = query_obj.with_where(where_filter)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
c2c8dd5d90a5-2
if where_filter: query_obj = query_obj.with_where(where_filter) result = query_obj.with_hybrid(query, alpha=self.alpha).with_limit(self.k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs [docs] async def aget_relevant_documents( self, query: str, where_filter: Optional[Dict[str, object]] = None ) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
ee989284a269-0
Source code for langchain.retrievers.zep from __future__ import annotations from typing import TYPE_CHECKING, Dict, List, Optional from langchain.schema import BaseRetriever, Document if TYPE_CHECKING: from zep_python import MemorySearchResult [docs]class ZepRetriever(BaseRetriever): """A Retriever implementation for the Zep long-term memory store. Search your user's long-term chat history with Zep. Note: You will need to provide the user's `session_id` to use this retriever. More on Zep: Zep provides long-term conversation storage for LLM apps. The server stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs. For server installation instructions, see: https://getzep.github.io/deployment/quickstart/ """ def __init__( self, session_id: str, url: str, top_k: Optional[int] = None, ): try: from zep_python import ZepClient except ImportError: raise ValueError( "Could not import zep-python package. " "Please install it with `pip install zep-python`." ) self.zep_client = ZepClient(base_url=url) self.session_id = session_id self.top_k = top_k def _search_result_to_doc( self, results: List[MemorySearchResult] ) -> List[Document]: return [ Document( page_content=r.message.pop("content"), metadata={"score": r.dist, **r.message}, ) for r in results
https://python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html
ee989284a269-1
) for r in results if r.message ] [docs] def get_relevant_documents( self, query: str, metadata: Optional[Dict] = None ) -> List[Document]: from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload( text=query, metadata=metadata ) results: List[MemorySearchResult] = self.zep_client.search_memory( self.session_id, payload, limit=self.top_k ) return self._search_result_to_doc(results) [docs] async def aget_relevant_documents( self, query: str, metadata: Optional[Dict] = None ) -> List[Document]: from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload( text=query, metadata=metadata ) results: List[MemorySearchResult] = await self.zep_client.asearch_memory( self.session_id, payload, limit=self.top_k ) return self._search_result_to_doc(results) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html
0d4fdac9f8a2-0
Source code for langchain.retrievers.knn """KNN Retriever. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb""" from __future__ import annotations import concurrent.futures from typing import Any, List, Optional import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray: with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts))) [docs]class KNNRetriever(BaseRetriever, BaseModel): embeddings: Embeddings index: Any texts: List[str] k: int = 4 relevancy_threshold: Optional[float] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: List[str], embeddings: Embeddings, **kwargs: Any ) -> KNNRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs) [docs] def get_relevant_documents(self, query: str) -> List[Document]: query_embeds = np.array(self.embeddings.embed_query(query)) # calc L2 norm index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True)) query_embeds = query_embeds / np.sqrt((query_embeds**2).sum()) similarities = index_embeds.dot(query_embeds)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html
0d4fdac9f8a2-1
similarities = index_embeds.dot(query_embeds) sorted_ix = np.argsort(-similarities) denominator = np.max(similarities) - np.min(similarities) + 1e-6 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [ Document(page_content=self.texts[row]) for row in sorted_ix[0 : self.k] if ( self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold ) ] return top_k_results [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html
32d37bcc875c-0
Source code for langchain.retrievers.svm """SMV Retriever. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb""" from __future__ import annotations import concurrent.futures from typing import Any, List, Optional import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray: with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts))) [docs]class SVMRetriever(BaseRetriever, BaseModel): embeddings: Embeddings index: Any texts: List[str] k: int = 4 relevancy_threshold: Optional[float] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: List[str], embeddings: Embeddings, **kwargs: Any ) -> SVMRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs) [docs] def get_relevant_documents(self, query: str) -> List[Document]: from sklearn import svm query_embeds = np.array(self.embeddings.embed_query(query)) x = np.concatenate([query_embeds[None, ...], self.index]) y = np.zeros(x.shape[0]) y[0] = 1 clf = svm.LinearSVC(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
32d37bcc875c-1
y[0] = 1 clf = svm.LinearSVC( class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1 ) clf.fit(x, y) similarities = clf.decision_function(x) sorted_ix = np.argsort(-similarities) # svm.LinearSVC in scikit-learn is non-deterministic. # if a text is the same as a query, there is no guarantee # the query will be in the first index. # this performs a simple swap, this works because anything # left of the 0 should be equivalent. zero_index = np.where(sorted_ix == 0)[0][0] if zero_index != 0: sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0] denominator = np.max(similarities) - np.min(similarities) + 1e-6 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [] for row in sorted_ix[1 : self.k + 1]: if ( self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold ): top_k_results.append(Document(page_content=self.texts[row - 1])) return top_k_results [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
1a04a6add156-0
Source code for langchain.retrievers.metal from typing import Any, List, Optional from langchain.schema import BaseRetriever, Document [docs]class MetalRetriever(BaseRetriever): def __init__(self, client: Any, params: Optional[dict] = None): from metal_sdk.metal import Metal if not isinstance(client, Metal): raise ValueError( "Got unexpected client, should be of type metal_sdk.metal.Metal. " f"Instead, got {type(client)}" ) self.client: Metal = client self.params = params or {} [docs] def get_relevant_documents(self, query: str) -> List[Document]: results = self.client.search({"text": query}, **self.params) final_results = [] for r in results["data"]: metadata = {k: v for k, v in r.items() if k != "text"} final_results.append(Document(page_content=r["text"], metadata=metadata)) return final_results [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/metal.html
d532b8aed272-0
Source code for langchain.retrievers.azure_cognitive_search """Retriever wrapper for Azure Cognitive Search.""" from __future__ import annotations import json from typing import Dict, List, Optional import aiohttp import requests from pydantic import BaseModel, Extra, root_validator from langchain.schema import BaseRetriever, Document from langchain.utils import get_from_dict_or_env [docs]class AzureCognitiveSearchRetriever(BaseRetriever, BaseModel): """Wrapper around Azure Cognitive Search.""" service_name: str = "" """Name of Azure Cognitive Search service""" index_name: str = "" """Name of Index inside Azure Cognitive Search service""" api_key: str = "" """API Key. Both Admin and Query keys work, but for reading data it's recommended to use a Query key.""" api_version: str = "2020-06-30" """API version""" aiosession: Optional[aiohttp.ClientSession] = None """ClientSession, in case we want to reuse connection for better performance.""" content_key: str = "content" """Key in a retrieved result to set as the Document page_content.""" class Config: extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that service name, index name and api key exists in environment.""" values["service_name"] = get_from_dict_or_env( values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME" ) values["index_name"] = get_from_dict_or_env( values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME" )
https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
d532b8aed272-1
) values["api_key"] = get_from_dict_or_env( values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY" ) return values def _build_search_url(self, query: str) -> str: base_url = f"https://{self.service_name}.search.windows.net/" endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}" return base_url + endpoint_path + f"&search={query}" @property def _headers(self) -> Dict[str, str]: return { "Content-Type": "application/json", "api-key": self.api_key, } def _search(self, query: str) -> List[dict]: search_url = self._build_search_url(query) response = requests.get(search_url, headers=self._headers) if response.status_code != 200: raise Exception(f"Error in search request: {response}") return json.loads(response.text)["value"] async def _asearch(self, query: str) -> List[dict]: search_url = self._build_search_url(query) if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.get(search_url, headers=self._headers) as response: response_json = await response.json() else: async with self.aiosession.get( search_url, headers=self._headers ) as response: response_json = await response.json() return response_json["value"] [docs] def get_relevant_documents(self, query: str) -> List[Document]: search_results = self._search(query) return [
https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
d532b8aed272-2
search_results = self._search(query) return [ Document(page_content=result.pop(self.content_key), metadata=result) for result in search_results ] [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: search_results = await self._asearch(query) return [ Document(page_content=result.pop(self.content_key), metadata=result) for result in search_results ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
4ea7338bfc21-0
Source code for langchain.retrievers.chatgpt_plugin_retriever from __future__ import annotations from typing import List, Optional import aiohttp import requests from pydantic import BaseModel from langchain.schema import BaseRetriever, Document [docs]class ChatGPTPluginRetriever(BaseRetriever, BaseModel): url: str bearer_token: str top_k: int = 3 filter: Optional[dict] = None aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_relevant_documents(self, query: str) -> List[Document]: url, json, headers = self._create_request(query) response = requests.post(url, json=json, headers=headers) results = response.json()["results"][0]["results"] docs = [] for d in results: content = d.pop("text") docs.append(Document(page_content=content, metadata=d)) return docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: url, json, headers = self._create_request(query) if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.post(url, headers=headers, json=json) as response: res = await response.json() else: async with self.aiosession.post( url, headers=headers, json=json ) as response: res = await response.json() results = res["results"][0]["results"] docs = [] for d in results: content = d.pop("text")
https://python.langchain.com/en/latest/_modules/langchain/retrievers/chatgpt_plugin_retriever.html
4ea7338bfc21-1
for d in results: content = d.pop("text") docs.append(Document(page_content=content, metadata=d)) return docs def _create_request(self, query: str) -> tuple[str, dict, dict]: url = f"{self.url}/query" json = { "queries": [ { "query": query, "filter": self.filter, "top_k": self.top_k, } ] } headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.bearer_token}", } return url, json, headers By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/chatgpt_plugin_retriever.html
745e96522f02-0
Source code for langchain.retrievers.merger_retriever from typing import List from langchain.schema import BaseRetriever, Document [docs]class MergerRetriever(BaseRetriever): """ This class merges the results of multiple retrievers. Args: retrievers: A list of retrievers to merge. """ def __init__( self, retrievers: List[BaseRetriever], ): """ Initialize the MergerRetriever class. Args: retrievers: A list of retrievers to merge. """ self.retrievers = retrievers [docs] def get_relevant_documents(self, query: str) -> List[Document]: """ Get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of relevant documents. """ # Merge the results of the retrievers. merged_documents = self.merge_documents(query) return merged_documents [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: """ Asynchronously get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of relevant documents. """ # Merge the results of the retrievers. merged_documents = await self.amerge_documents(query) return merged_documents [docs] def merge_documents(self, query: str) -> List[Document]: """ Merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents. """
https://python.langchain.com/en/latest/_modules/langchain/retrievers/merger_retriever.html
745e96522f02-1
Returns: A list of merged documents. """ # Get the results of all retrievers. retriever_docs = [ retriever.get_relevant_documents(query) for retriever in self.retrievers ] # Merge the results of the retrievers. merged_documents = [] max_docs = max(len(docs) for docs in retriever_docs) for i in range(max_docs): for retriever, doc in zip(self.retrievers, retriever_docs): if i < len(doc): merged_documents.append(doc[i]) return merged_documents [docs] async def amerge_documents(self, query: str) -> List[Document]: """ Asynchronously merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents. """ # Get the results of all retrievers. retriever_docs = [ await retriever.aget_relevant_documents(query) for retriever in self.retrievers ] # Merge the results of the retrievers. merged_documents = [] max_docs = max(len(docs) for docs in retriever_docs) for i in range(max_docs): for retriever, doc in zip(self.retrievers, retriever_docs): if i < len(doc): merged_documents.append(doc[i]) return merged_documents By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/merger_retriever.html
55e6d3c7e2be-0
Source code for langchain.retrievers.contextual_compression """Retriever that wraps a base retriever and filters the results.""" from typing import List from pydantic import BaseModel, Extra from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) from langchain.schema import BaseRetriever, Document [docs]class ContextualCompressionRetriever(BaseRetriever, BaseModel): """Retriever that wraps a base retriever and compresses the results.""" base_compressor: BaseDocumentCompressor """Compressor for compressing retrieved documents.""" base_retriever: BaseRetriever """Base Retriever to use for getting relevant documents.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def get_relevant_documents(self, query: str) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: Sequence of relevant documents """ docs = self.base_retriever.get_relevant_documents(query) compressed_docs = self.base_compressor.compress_documents(docs, query) return list(compressed_docs) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ docs = await self.base_retriever.aget_relevant_documents(query) compressed_docs = await self.base_compressor.acompress_documents(docs, query) return list(compressed_docs) By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html
55e6d3c7e2be-1
return list(compressed_docs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html
fc90dc88438a-0
Source code for langchain.retrievers.arxiv from typing import List from langchain.schema import BaseRetriever, Document from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivRetriever(BaseRetriever, ArxivAPIWrapper): """ It is effectively a wrapper for ArxivAPIWrapper. It wraps load() to get_relevant_documents(). It uses all ArxivAPIWrapper arguments without any change. """ [docs] def get_relevant_documents(self, query: str) -> List[Document]: return self.load(query=query) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/arxiv.html
faeac3fc9c0f-0
Source code for langchain.retrievers.aws_kendra_index_retriever """Retriever wrapper for AWS Kendra.""" import re from typing import Any, Dict, List from langchain.schema import BaseRetriever, Document [docs]class AwsKendraIndexRetriever(BaseRetriever): """Wrapper around AWS Kendra.""" kendraindex: str """Kendra index id""" k: int """Number of documents to query for.""" languagecode: str """Languagecode used for querying.""" kclient: Any """ boto3 client for Kendra. """ def __init__( self, kclient: Any, kendraindex: str, k: int = 3, languagecode: str = "en" ): self.kendraindex = kendraindex self.k = k self.languagecode = languagecode self.kclient = kclient def _clean_result(self, res_text: str) -> str: return re.sub("\s+", " ", res_text).replace("...", "") def _get_top_n_results(self, resp: Dict, count: int) -> Document: r = resp["ResultItems"][count] doc_title = r["DocumentTitle"]["Text"] doc_uri = r["DocumentURI"] r_type = r["Type"] if ( r["AdditionalAttributes"] and r["AdditionalAttributes"][0]["Key"] == "AnswerText" ): res_text = r["AdditionalAttributes"][0]["Value"]["TextWithHighlightsValue"][ "Text" ] else: res_text = r["DocumentExcerpt"]["Text"] doc_excerpt = self._clean_result(res_text)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/aws_kendra_index_retriever.html
faeac3fc9c0f-1
doc_excerpt = self._clean_result(res_text) combined_text = f"""Document Title: {doc_title} Document Excerpt: {doc_excerpt} """ return Document( page_content=combined_text, metadata={ "source": doc_uri, "title": doc_title, "excerpt": doc_excerpt, "type": r_type, }, ) def _kendra_query(self, kquery: str) -> List[Document]: response = self.kclient.query( IndexId=self.kendraindex, QueryText=kquery.strip(), AttributeFilter={ "AndAllFilters": [ { "EqualsTo": { "Key": "_language_code", "Value": { "StringValue": self.languagecode, }, } } ] }, ) if len(response["ResultItems"]) > self.k: r_count = self.k else: r_count = len(response["ResultItems"]) return [self._get_top_n_results(response, i) for i in range(0, r_count)] [docs] def get_relevant_documents(self, query: str) -> List[Document]: """Run search on Kendra index and get top k documents docs = get_relevant_documents('This is my query') """ return self._kendra_query(query) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError("AwsKendraIndexRetriever does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/aws_kendra_index_retriever.html
95f2bf39d14e-0
Source code for langchain.retrievers.document_compressors.base """Interface for retrieved document compressors.""" from abc import ABC, abstractmethod from typing import List, Sequence, Union from pydantic import BaseModel from langchain.schema import BaseDocumentTransformer, Document class BaseDocumentCompressor(BaseModel, ABC): """Base abstraction interface for document compression.""" @abstractmethod def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" @abstractmethod async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" [docs]class DocumentCompressorPipeline(BaseDocumentCompressor): """Document compressor that uses a pipeline of transformers.""" transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]] """List of document filters that are chained together and run in sequence.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Transform a list of documents.""" for _transformer in self.transformers: if isinstance(_transformer, BaseDocumentCompressor): documents = _transformer.compress_documents(documents, query) elif isinstance(_transformer, BaseDocumentTransformer): documents = _transformer.transform_documents(documents) else: raise ValueError(f"Got unexpected transformer type: {_transformer}") return documents [docs] async def acompress_documents( self, documents: Sequence[Document], query: str
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html
95f2bf39d14e-1
self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" for _transformer in self.transformers: if isinstance(_transformer, BaseDocumentCompressor): documents = await _transformer.acompress_documents(documents, query) elif isinstance(_transformer, BaseDocumentTransformer): documents = await _transformer.atransform_documents(documents) else: raise ValueError(f"Got unexpected transformer type: {_transformer}") return documents By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html
51edcea0bcee-0
Source code for langchain.retrievers.document_compressors.cohere_rerank from __future__ import annotations from typing import TYPE_CHECKING, Dict, Sequence from pydantic import Extra, root_validator from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.schema import Document from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: from cohere import Client else: # We do to avoid pydantic annotation issues when actually instantiating # while keeping this import optional try: from cohere import Client except ImportError: pass [docs]class CohereRerank(BaseDocumentCompressor): client: Client top_n: int = 3 model: str = "rerank-english-v2.0" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: if len(documents) == 0: # to avoid empty api call return []
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
51edcea0bcee-1
return [] doc_list = list(documents) _docs = [d.page_content for d in doc_list] results = self.client.rerank( model=self.model, query=query, documents=_docs, top_n=self.top_n ) final_results = [] for r in results: doc = doc_list[r.index] doc.metadata["relevance_score"] = r.relevance_score final_results.append(doc) return final_results [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
de3863e2f03c-0
Source code for langchain.retrievers.document_compressors.embeddings_filter """Document compressor that uses embeddings to drop documents unrelated to the query.""" from typing import Callable, Dict, Optional, Sequence import numpy as np from pydantic import root_validator from langchain.document_transformers import ( _get_embeddings_from_stateful_docs, get_stateful_documents, ) from langchain.embeddings.base import Embeddings from langchain.math_utils import cosine_similarity from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) from langchain.schema import Document [docs]class EmbeddingsFilter(BaseDocumentCompressor): embeddings: Embeddings """Embeddings to use for embedding document contents and queries.""" similarity_fn: Callable = cosine_similarity """Similarity function for comparing documents. Function expected to take as input two matrices (List[List[float]]) and return a matrix of scores where higher values indicate greater similarity.""" k: Optional[int] = 20 """The number of relevant documents to return. Can be set to None, in which case `similarity_threshold` must be specified. Defaults to 20.""" similarity_threshold: Optional[float] """Threshold for determining when two documents are similar enough to be considered redundant. Defaults to None, must be specified if `k` is set to None.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_params(cls, values: Dict) -> Dict: """Validate similarity parameters.""" if values["k"] is None and values["similarity_threshold"] is None: raise ValueError("Must specify one of `k` or `similarity_threshold`.") return values
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html
de3863e2f03c-1
return values [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter documents based on similarity of their embeddings to the query.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs( self.embeddings, stateful_documents ) embedded_query = self.embeddings.embed_query(query) similarity = self.similarity_fn([embedded_query], embedded_documents)[0] included_idxs = np.arange(len(embedded_documents)) if self.k is not None: included_idxs = np.argsort(similarity)[::-1][: self.k] if self.similarity_threshold is not None: similar_enough = np.where( similarity[included_idxs] > self.similarity_threshold ) included_idxs = included_idxs[similar_enough] return [stateful_documents[i] for i in included_idxs] [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents.""" raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html
ba936cb49a3e-0
Source code for langchain.retrievers.document_compressors.chain_filter """Filter that uses an LLM to drop documents that aren't relevant to the query.""" from typing import Any, Callable, Dict, Optional, Sequence from langchain import BasePromptTemplate, LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel from langchain.output_parsers.boolean import BooleanOutputParser from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.retrievers.document_compressors.chain_filter_prompt import ( prompt_template, ) from langchain.schema import Document def _get_default_chain_prompt() -> PromptTemplate: return PromptTemplate( template=prompt_template, input_variables=["question", "context"], output_parser=BooleanOutputParser(), ) def default_get_input(query: str, doc: Document) -> Dict[str, Any]: """Return the compression chain input.""" return {"question": query, "context": doc.page_content} [docs]class LLMChainFilter(BaseDocumentCompressor): """Filter that drops documents that aren't relevant to the query.""" llm_chain: LLMChain """LLM wrapper to use for filtering documents. The chain prompt is expected to have a BooleanOutputParser.""" get_input: Callable[[str, Document], dict] = default_get_input """Callable for constructing the chain input from the query and a Document.""" [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents based on their relevance to the query.""" filtered_docs = [] for doc in documents: _input = self.get_input(query, doc) include_doc = self.llm_chain.predict_and_parse(**_input)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html
ba936cb49a3e-1
include_doc = self.llm_chain.predict_and_parse(**_input) if include_doc: filtered_docs.append(doc) return filtered_docs [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents.""" raise NotImplementedError [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any ) -> "LLMChainFilter": _prompt = prompt if prompt is not None else _get_default_chain_prompt() llm_chain = LLMChain(llm=llm, prompt=_prompt) return cls(llm_chain=llm_chain, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html
8ce6b3c39fe3-0
Source code for langchain.retrievers.document_compressors.chain_extract """DocumentFilter that uses an LLM chain to extract the relevant parts of documents.""" from __future__ import annotations import asyncio from typing import Any, Callable, Dict, Optional, Sequence from langchain import LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.retrievers.document_compressors.chain_extract_prompt import ( prompt_template, ) from langchain.schema import BaseOutputParser, Document def default_get_input(query: str, doc: Document) -> Dict[str, Any]: """Return the compression chain input.""" return {"question": query, "context": doc.page_content} class NoOutputParser(BaseOutputParser[str]): """Parse outputs that could return a null string of some sort.""" no_output_str: str = "NO_OUTPUT" def parse(self, text: str) -> str: cleaned_text = text.strip() if cleaned_text == self.no_output_str: return "" return cleaned_text def _get_default_chain_prompt() -> PromptTemplate: output_parser = NoOutputParser() template = prompt_template.format(no_output_str=output_parser.no_output_str) return PromptTemplate( template=template, input_variables=["question", "context"], output_parser=output_parser, ) [docs]class LLMChainExtractor(BaseDocumentCompressor): llm_chain: LLMChain """LLM wrapper to use for compressing documents.""" get_input: Callable[[str, Document], dict] = default_get_input """Callable for constructing the chain input from the query and a Document.""" [docs] def compress_documents(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
8ce6b3c39fe3-1
[docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress page content of raw documents.""" compressed_docs = [] for doc in documents: _input = self.get_input(query, doc) output = self.llm_chain.predict_and_parse(**_input) if len(output) == 0: continue compressed_docs.append(Document(page_content=output, metadata=doc.metadata)) return compressed_docs [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress page content of raw documents asynchronously.""" outputs = await asyncio.gather( *[ self.llm_chain.apredict_and_parse(**self.get_input(query, doc)) for doc in documents ] ) compressed_docs = [] for i, doc in enumerate(documents): if len(outputs[i]) == 0: continue compressed_docs.append( Document(page_content=outputs[i], metadata=doc.metadata) ) return compressed_docs [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, get_input: Optional[Callable[[str, Document], str]] = None, llm_chain_kwargs: Optional[dict] = None, ) -> LLMChainExtractor: """Initialize from LLM.""" _prompt = prompt if prompt is not None else _get_default_chain_prompt() _get_input = get_input if get_input is not None else default_get_input
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
8ce6b3c39fe3-2
_get_input = get_input if get_input is not None else default_get_input llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {})) return cls(llm_chain=llm_chain, get_input=_get_input) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
f06de8e32e77-0
Source code for langchain.retrievers.self_query.base """Retriever that generates and executes structured queries over its own data source.""" from typing import Any, Dict, List, Optional, Type, cast from pydantic import BaseModel, Field, root_validator from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.chains.query_constructor.base import load_query_constructor_chain from langchain.chains.query_constructor.ir import StructuredQuery, Visitor from langchain.chains.query_constructor.schema import AttributeInfo from langchain.retrievers.self_query.chroma import ChromaTranslator from langchain.retrievers.self_query.pinecone import PineconeTranslator from langchain.retrievers.self_query.qdrant import QdrantTranslator from langchain.retrievers.self_query.weaviate import WeaviateTranslator from langchain.schema import BaseRetriever, Document from langchain.vectorstores import Chroma, Pinecone, Qdrant, VectorStore, Weaviate def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: """Get the translator class corresponding to the vector store class.""" vectorstore_cls = vectorstore.__class__ BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = { Pinecone: PineconeTranslator, Chroma: ChromaTranslator, Weaviate: WeaviateTranslator, Qdrant: QdrantTranslator, } if vectorstore_cls not in BUILTIN_TRANSLATORS: raise ValueError( f"Self query retriever with Vector Store type {vectorstore_cls}" f" not supported." ) if isinstance(vectorstore, Qdrant): return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
f06de8e32e77-1
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key) return BUILTIN_TRANSLATORS[vectorstore_cls]() [docs]class SelfQueryRetriever(BaseRetriever, BaseModel): """Retriever that wraps around a vector store and uses an LLM to generate the vector store queries.""" vectorstore: VectorStore """The underlying vector store from which documents will be retrieved.""" llm_chain: LLMChain """The LLMChain for generating the vector store queries.""" search_type: str = "similarity" """The search type to perform on the vector store.""" search_kwargs: dict = Field(default_factory=dict) """Keyword arguments to pass in to the vector store search.""" structured_query_translator: Visitor """Translator for turning internal query language into vectorstore search params.""" verbose: bool = False class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator(pre=True) def validate_translator(cls, values: Dict) -> Dict: """Validate translator.""" if "structured_query_translator" not in values: values["structured_query_translator"] = _get_builtin_translator( values["vectorstore"] ) return values [docs] def get_relevant_documents(self, query: str) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ inputs = self.llm_chain.prep_inputs({"query": query}) structured_query = cast( StructuredQuery, self.llm_chain.predict_and_parse(callbacks=None, **inputs) ) if self.verbose:
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
f06de8e32e77-2
) if self.verbose: print(structured_query) new_query, new_kwargs = self.structured_query_translator.visit_structured_query( structured_query ) if structured_query.limit is not None: new_kwargs["k"] = structured_query.limit search_kwargs = {**self.search_kwargs, **new_kwargs} docs = self.vectorstore.search(new_query, self.search_type, **search_kwargs) return docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, document_contents: str, metadata_field_info: List[AttributeInfo], structured_query_translator: Optional[Visitor] = None, chain_kwargs: Optional[Dict] = None, enable_limit: bool = False, **kwargs: Any, ) -> "SelfQueryRetriever": if structured_query_translator is None: structured_query_translator = _get_builtin_translator(vectorstore) chain_kwargs = chain_kwargs or {} if "allowed_comparators" not in chain_kwargs: chain_kwargs[ "allowed_comparators" ] = structured_query_translator.allowed_comparators if "allowed_operators" not in chain_kwargs: chain_kwargs[ "allowed_operators" ] = structured_query_translator.allowed_operators llm_chain = load_query_constructor_chain( llm, document_contents, metadata_field_info, enable_limit=enable_limit, **chain_kwargs, ) return cls(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
f06de8e32e77-3
**chain_kwargs, ) return cls( llm_chain=llm_chain, vectorstore=vectorstore, structured_query_translator=structured_query_translator, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
63674000c00a-0
Source code for langchain.vectorstores.redis """Wrapper around Redis vector database.""" from __future__ import annotations import json import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Literal, Mapping, Optional, Tuple, Type, ) import numpy as np from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore, VectorStoreRetriever logger = logging.getLogger(__name__) if TYPE_CHECKING: from redis.client import Redis as RedisType from redis.commands.search.query import Query # required modules REDIS_REQUIRED_MODULES = [ {"name": "search", "ver": 20400}, {"name": "searchlight", "ver": 20400}, ] # distance mmetrics REDIS_DISTANCE_METRICS = Literal["COSINE", "IP", "L2"] def _check_redis_module_exist(client: RedisType, required_modules: List[dict]) -> None: """Check if the correct Redis modules are installed.""" installed_modules = client.module_list() installed_modules = { module[b"name"].decode("utf-8"): module for module in installed_modules } for module in required_modules: if module["name"] in installed_modules and int( installed_modules[module["name"]][b"ver"] ) >= int(module["ver"]): return # otherwise raise error error_message = ( "Redis cannot be used as a vector database without RediSearch >=2.4"
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-1
"Redis cannot be used as a vector database without RediSearch >=2.4" "Please head to https://redis.io/docs/stack/search/quick_start/" "to know more about installing the RediSearch module within Redis Stack." ) logging.error(error_message) raise ValueError(error_message) def _check_index_exists(client: RedisType, index_name: str) -> bool: """Check if Redis index exists.""" try: client.ft(index_name).info() except: # noqa: E722 logger.info("Index does not exist") return False logger.info("Index already exists") return True def _redis_key(prefix: str) -> str: """Redis key schema for a given prefix.""" return f"{prefix}:{uuid.uuid4().hex}" def _redis_prefix(index_name: str) -> str: """Redis key prefix for a given index.""" return f"doc:{index_name}" def _default_relevance_score(val: float) -> float: return 1 - val [docs]class Redis(VectorStore): """Wrapper around Redis vector database. To use, you should have the ``redis`` python package installed. Example: .. code-block:: python from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = Redis( redis_url="redis://username:password@localhost:6379" index_name="my-index", embedding_function=embeddings.embed_query, ) """ def __init__( self, redis_url: str, index_name: str, embedding_function: Callable,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-2
index_name: str, embedding_function: Callable, content_key: str = "content", metadata_key: str = "metadata", vector_key: str = "content_vector", relevance_score_fn: Optional[ Callable[[float], float] ] = _default_relevance_score, **kwargs: Any, ): """Initialize with necessary components.""" try: import redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis>=4.1.0`." ) self.embedding_function = embedding_function self.index_name = index_name try: # connect to redis from url redis_client = redis.from_url(redis_url, **kwargs) # check if redis has redisearch module installed _check_redis_module_exist(redis_client, REDIS_REQUIRED_MODULES) except ValueError as e: raise ValueError(f"Redis failed to connect: {e}") self.client = redis_client self.content_key = content_key self.metadata_key = metadata_key self.vector_key = vector_key self.relevance_score_fn = relevance_score_fn def _create_index( self, dim: int = 1536, distance_metric: REDIS_DISTANCE_METRICS = "COSINE" ) -> None: try: from redis.commands.search.field import TextField, VectorField from redis.commands.search.indexDefinition import IndexDefinition, IndexType except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) # Check if index exists if not _check_index_exists(self.client, self.index_name):
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-3
if not _check_index_exists(self.client, self.index_name): # Define schema schema = ( TextField(name=self.content_key), TextField(name=self.metadata_key), VectorField( self.vector_key, "FLAT", { "TYPE": "FLOAT32", "DIM": dim, "DISTANCE_METRIC": distance_metric, }, ), ) prefix = _redis_prefix(self.index_name) # Create Redis Index self.client.ft(self.index_name).create_index( fields=schema, definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH), ) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, keys: Optional[List[str]] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. keys (Optional[List[str]], optional): Optional key values to use as ids. Defaults to None. batch_size (int, optional): Batch size to use for writes. Defaults to 1000. Returns: List[str]: List of ids added to the vectorstore """ ids = [] prefix = _redis_prefix(self.index_name)
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-4
""" ids = [] prefix = _redis_prefix(self.index_name) # Write data to redis pipeline = self.client.pipeline(transaction=False) for i, text in enumerate(texts): # Use provided values by default or fallback key = keys[i] if keys else _redis_key(prefix) metadata = metadatas[i] if metadatas else {} embedding = embeddings[i] if embeddings else self.embedding_function(text) pipeline.hset( key, mapping={ self.content_key: text, self.vector_key: np.array(embedding, dtype=np.float32).tobytes(), self.metadata_key: json.dumps(metadata), }, ) ids.append(key) # Write batch if i % batch_size == 0: pipeline.execute() # Cleanup final batch pipeline.execute() return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.similarity_search_with_score(query, k=k) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_limit_score( self, query: str, k: int = 4, score_threshold: float = 0.2, **kwargs: Any ) -> List[Document]: """
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-5
) -> List[Document]: """ Returns the most similar indexed documents to the query text within the score_threshold range. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. score_threshold (float): The minimum matching score required for a document to be considered a match. Defaults to 0.2. Because the similarity calculation algorithm is based on cosine similarity, the smaller the angle, the higher the similarity. Returns: List[Document]: A list of documents that are most similar to the query text, including the match score for each document. Note: If there are no documents that satisfy the score_threshold value, an empty list is returned. """ docs_and_scores = self.similarity_search_with_score(query, k=k) return [doc for doc, score in docs_and_scores if score < score_threshold] def _prepare_query(self, k: int) -> Query: try: from redis.commands.search.query import Query except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) # Prepare the Query hybrid_fields = "*" base_query = ( f"{hybrid_fields}=>[KNN {k} @{self.vector_key} $vector AS vector_score]" ) return_fields = [self.metadata_key, self.content_key, "vector_score"] return ( Query(base_query) .return_fields(*return_fields) .sort_by("vector_score") .paging(0, k) .dialect(2) )
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-6
.paging(0, k) .dialect(2) ) [docs] def similarity_search_with_score( self, query: str, k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ # Creates embedding vector from user query embedding = self.embedding_function(query) # Creates Redis query redis_query = self._prepare_query(k) params_dict: Mapping[str, str] = { "vector": np.array(embedding) # type: ignore .astype(dtype=np.float32) .tobytes() } # Perform vector search results = self.client.ft(self.index_name).search(redis_query, params_dict) # Prepare document results docs = [ ( Document( page_content=result.content, metadata=json.loads(result.metadata) ), float(result.vector_score), ) for result in results.docs ] return docs def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ if self.relevance_score_fn is None: raise ValueError( "relevance_score_fn must be provided to"
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-7
raise ValueError( "relevance_score_fn must be provided to" " Redis constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score(query, k=k) return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores] [docs] @classmethod def from_texts_return_keys( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: Optional[str] = None, content_key: str = "content", metadata_key: str = "metadata", vector_key: str = "content_vector", distance_metric: REDIS_DISTANCE_METRICS = "COSINE", **kwargs: Any, ) -> Tuple[Redis, List[str]]: """Create a Redis vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in Redis. 3. Adds the documents to the newly created Redis index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() redisearch = RediSearch.from_texts( texts, embeddings, redis_url="redis://username:password@localhost:6379" ) """ redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL") if "redis_url" in kwargs: kwargs.pop("redis_url") # Name of the search index if not given
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-8
kwargs.pop("redis_url") # Name of the search index if not given if not index_name: index_name = uuid.uuid4().hex # Create instance instance = cls( redis_url, index_name, embedding.embed_query, content_key=content_key, metadata_key=metadata_key, vector_key=vector_key, **kwargs, ) # Create embeddings over documents embeddings = embedding.embed_documents(texts) # Create the search index instance._create_index(dim=len(embeddings[0]), distance_metric=distance_metric) # Add data to Redis keys = instance.add_texts(texts, metadatas, embeddings) return instance, keys [docs] @classmethod def from_texts( cls: Type[Redis], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: Optional[str] = None, content_key: str = "content", metadata_key: str = "metadata", vector_key: str = "content_vector", **kwargs: Any, ) -> Redis: """Create a Redis vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in Redis. 3. Adds the documents to the newly created Redis index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() redisearch = RediSearch.from_texts(
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-9
embeddings = OpenAIEmbeddings() redisearch = RediSearch.from_texts( texts, embeddings, redis_url="redis://username:password@localhost:6379" ) """ instance, _ = cls.from_texts_return_keys( texts, embedding, metadatas=metadatas, index_name=index_name, content_key=content_key, metadata_key=metadata_key, vector_key=vector_key, **kwargs, ) return instance [docs] @staticmethod def drop_index( index_name: str, delete_documents: bool, **kwargs: Any, ) -> bool: """ Drop a Redis search index. Args: index_name (str): Name of the index to drop. delete_documents (bool): Whether to drop the associated documents. Returns: bool: Whether or not the drop was successful. """ redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL") try: import redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) try: # We need to first remove redis_url from kwargs, # otherwise passing it to Redis will result in an error. if "redis_url" in kwargs: kwargs.pop("redis_url") client = redis.from_url(url=redis_url, **kwargs) except ValueError as e: raise ValueError(f"Your redis connected error: {e}") # Check if index exists try: client.ft(index_name).dropindex(delete_documents)
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-10
try: client.ft(index_name).dropindex(delete_documents) logger.info("Drop index") return True except: # noqa: E722 # Index not exist return False [docs] @classmethod def from_existing_index( cls, embedding: Embeddings, index_name: str, content_key: str = "content", metadata_key: str = "metadata", vector_key: str = "content_vector", **kwargs: Any, ) -> Redis: """Connect to an existing Redis index.""" redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL") try: import redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) try: # We need to first remove redis_url from kwargs, # otherwise passing it to Redis will result in an error. if "redis_url" in kwargs: kwargs.pop("redis_url") client = redis.from_url(url=redis_url, **kwargs) # check if redis has redisearch module installed _check_redis_module_exist(client, REDIS_REQUIRED_MODULES) # ensure that the index already exists assert _check_index_exists( client, index_name ), f"Index {index_name} does not exist" except Exception as e: raise ValueError(f"Redis failed to connect: {e}") return cls( redis_url, index_name, embedding.embed_query, content_key=content_key, metadata_key=metadata_key, vector_key=vector_key, **kwargs, )
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-11
vector_key=vector_key, **kwargs, ) [docs] def as_retriever(self, **kwargs: Any) -> RedisVectorStoreRetriever: return RedisVectorStoreRetriever(vectorstore=self, **kwargs) class RedisVectorStoreRetriever(VectorStoreRetriever, BaseModel): vectorstore: Redis search_type: str = "similarity" k: int = 4 score_threshold: float = 0.4 class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "similarity_limit"): raise ValueError(f"search_type of {search_type} not allowed.") return values def get_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search(query, k=self.k) elif self.search_type == "similarity_limit": docs = self.vectorstore.similarity_search_limit_score( query, k=self.k, score_threshold=self.score_threshold ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError("RedisVectorStoreRetriever does not support async") def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore."""
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
63674000c00a-12
"""Add documents to vectorstore.""" return self.vectorstore.add_documents(documents, **kwargs) async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" return await self.vectorstore.aadd_documents(documents, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html
c421bd8e3941-0
Source code for langchain.vectorstores.clickhouse """Wrapper around open source ClickHouse VectorSearch capability.""" from __future__ import annotations import json import logging from hashlib import sha1 from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple, Union from pydantic import BaseSettings from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger() def has_mul_sub_str(s: str, *args: Any) -> bool: for a in args: if a not in s: return False return True [docs]class ClickhouseSettings(BaseSettings): """ClickHouse Client Configuration Attribute: clickhouse_host (str) : An URL to connect to MyScale backend. Defaults to 'localhost'. clickhouse_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Username to login. Defaults to None. password (str) : Password to login. Defaults to None. index_type (str): index type string. index_param (list): index build parameter. index_query_params(dict): index query parameters. database (str) : Database name to find the table. Defaults to 'default'. table (str) : Table name to operate on. Defaults to 'vector_table'. metric (str) : Metric to compute distance, supported are ('angular', 'euclidean', 'manhattan', 'hamming', 'dot'). Defaults to 'angular'. https://github.com/spotify/annoy/blob/main/src/annoymodule.cc#L149-L169
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-1
column_map (Dict) : Column type map to project column name onto langchain semantics. Must have keys: `text`, `id`, `vector`, must be same size to number of columns. For example: .. code-block:: python { 'id': 'text_id', 'uuid': 'global_unique_id' 'embedding': 'text_embedding', 'document': 'text_plain', 'metadata': 'metadata_dictionary_in_json', } Defaults to identity map. """ host: str = "localhost" port: int = 8123 username: Optional[str] = None password: Optional[str] = None index_type: str = "annoy" # Annoy supports L2Distance and cosineDistance. index_param: Optional[Union[List, Dict]] = [100, "'L2Distance'"] index_query_params: Dict[str, str] = {} column_map: Dict[str, str] = { "id": "id", "uuid": "uuid", "document": "document", "embedding": "embedding", "metadata": "metadata", } database: str = "default" table: str = "langchain" metric: str = "angular" def __getitem__(self, item: str) -> Any: return getattr(self, item) class Config: env_file = ".env" env_prefix = "clickhouse_" env_file_encoding = "utf-8" [docs]class Clickhouse(VectorStore): """Wrapper around ClickHouse vector database You need a `clickhouse-connect` python package, and a valid account to connect to ClickHouse.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-2
to connect to ClickHouse. ClickHouse can not only search with simple vector indexes, it also supports complex query with multiple conditions, constraints and even sub-queries. For more information, please visit [ClickHouse official site](https://clickhouse.com/clickhouse) """ def __init__( self, embedding: Embeddings, config: Optional[ClickhouseSettings] = None, **kwargs: Any, ) -> None: """ClickHouse Wrapper to LangChain embedding_function (Embeddings): config (ClickHouseSettings): Configuration to ClickHouse Client Other keyword arguments will pass into [clickhouse-connect](https://docs.clickhouse.com/) """ try: from clickhouse_connect import get_client except ImportError: raise ValueError( "Could not import clickhouse connect python package. " "Please install it with `pip install clickhouse-connect`." ) try: from tqdm import tqdm self.pgbar = tqdm except ImportError: # Just in case if tqdm is not installed self.pgbar = lambda x, **kwargs: x super().__init__() if config is not None: self.config = config else: self.config = ClickhouseSettings() assert self.config assert self.config.host and self.config.port assert ( self.config.column_map and self.config.database and self.config.table and self.config.metric ) for k in ["id", "embedding", "document", "metadata", "uuid"]: assert k in self.config.column_map assert self.config.metric in [ "angular", "euclidean", "manhattan",
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-3
"angular", "euclidean", "manhattan", "hamming", "dot", ] # initialize the schema dim = len(embedding.embed_query("test")) index_params = ( ( ",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()]) if self.config.index_param else "" ) if isinstance(self.config.index_param, Dict) else ",".join([str(p) for p in self.config.index_param]) if isinstance(self.config.index_param, List) else self.config.index_param ) self.schema = f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} Nullable(String), {self.config.column_map['document']} Nullable(String), {self.config.column_map['embedding']} Array(Float32), {self.config.column_map['metadata']} JSON, {self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(), CONSTRAINT cons_vec_len CHECK length({self.config.column_map['embedding']}) = {dim}, INDEX vec_idx {self.config.column_map['embedding']} TYPE \ {self.config.index_type}({index_params}) GRANULARITY 1000 ) ENGINE = MergeTree ORDER BY uuid SETTINGS index_granularity = 8192\ """ self.dim = dim self.BS = "\\" self.must_escape = ("\\", "'") self.embedding_function = embedding self.dist_order = "ASC" # Only support ConsingDistance and L2Distance # Create a connection to clickhouse self.client = get_client( host=self.config.host, port=self.config.port,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-4
host=self.config.host, port=self.config.port, username=self.config.username, password=self.config.password, **kwargs, ) # Enable JSON type self.client.command("SET allow_experimental_object_type=1") # Enable Annoy index self.client.command("SET allow_experimental_annoy_index=1") self.client.command(self.schema) [docs] def escape_str(self, value: str) -> str: return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value) def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str: ks = ",".join(column_names) _data = [] for n in transac: n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n]) _data.append(f"({n})") i_str = f""" INSERT INTO TABLE {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None: _insert_query = self._build_insert_sql(transac, column_names) self.client.command(_insert_query) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any, ) -> List[str]: """Insert more texts through the embeddings and add to the VectorStore. Args:
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-5
"""Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore. """ # Embed and create the documents ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = { colmap_["id"]: ids, colmap_["document"]: texts, colmap_["embedding"]: self.embedding_function.embed_documents(list(texts)), } metadatas = metadatas or [{} for _ in texts] column_names[colmap_["metadata"]] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar( zip(*values), desc="Inserting data...", total=len(metadatas) ): assert ( len(v[keys.index(self.config.column_map["embedding"])]) == self.dim ) transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys)
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-6
if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[ClickhouseSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> Clickhouse: """Create ClickHouse wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (ClickHouseSettings, Optional): ClickHouse configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to ClickHouse. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Other keyword arguments will pass into [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns: ClickHouse Index """ ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas) return ctx def __repr__(self) -> str:
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-7
return ctx def __repr__(self) -> str: """Text representation for ClickHouse Vector Store, prints backends, username and schemas. Easy to use with `str(ClickHouse())` Returns: repr: string to show connection info and data schema """ _repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ " _repr += f"{self.config.host}:{self.config.port}\033[0m\n\n" _repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n" _repr += "-" * 51 + "\n" for r in self.client.query( f"DESC {self.config.database}.{self.config.table}" ).named_results(): _repr += ( f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n" ) _repr += "-" * 51 + "\n" return _repr def _build_query_sql( self, q_emb: List[float], topk: int, where_str: Optional[str] = None ) -> str: q_emb_str = ",".join(map(str, q_emb)) if where_str: where_str = f"PREWHERE {where_str}" else: where_str = "" settings_strs = [] if self.config.index_query_params: for k in self.config.index_query_params: settings_strs.append(f"SETTING {k}={self.config.index_query_params[k]}") q_str = f"""
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-8
q_str = f""" SELECT {self.config.column_map['document']}, {self.config.column_map['metadata']}, dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY L2Distance({self.config.column_map['embedding']}, [{q_emb_str}]) AS dist {self.dist_order} LIMIT {topk} {' '.join(settings_strs)} """ return q_str [docs] def similarity_search( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Document]: """Perform a similarity search with ClickHouse Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector( self.embedding_function.embed_query(query), k, where_str, **kwargs ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search with ClickHouse by vectors Args: query (str): query string
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-9
Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_query_sql(embedding, k, where_str) try: return [ Document( page_content=r[self.config.column_map["document"]], metadata=r[self.config.column_map["metadata"]], ) for r in self.client.query(q_str).named_results() ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Perform a similarity search with ClickHouse Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
c421bd8e3941-10
NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_query_sql( self.embedding_function.embed_query(query), k, where_str ) try: return [ ( Document( page_content=r[self.config.column_map["document"]], metadata=r[self.config.column_map["metadata"]], ), r["dist"], ) for r in self.client.query(q_str).named_results() ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] def drop(self) -> None: """ Helper function: Drop data """ self.client.command( f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}" ) @property def metadata_column(self) -> str: return self.config.column_map["metadata"] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/clickhouse.html
2b0cfa121219-0
Source code for langchain.vectorstores.mongodb_atlas from __future__ import annotations import logging from typing import ( TYPE_CHECKING, Any, Dict, Generator, Iterable, List, Optional, Tuple, TypeVar, Union, ) from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from pymongo.collection import Collection MongoDBDocumentType = TypeVar("MongoDBDocumentType", bound=Dict[str, Any]) logger = logging.getLogger(__name__) DEFAULT_INSERT_BATCH_SIZE = 100 [docs]class MongoDBAtlasVectorSearch(VectorStore): """Wrapper around MongoDB Atlas Vector Search. To use, you should have both: - the ``pymongo`` python package installed - a connection string associated with a MongoDB Atlas Cluster having deployed an Atlas Search index Example: .. code-block:: python from langchain.vectorstores import MongoDBAtlasVectorSearch from langchain.embeddings.openai import OpenAIEmbeddings from pymongo import MongoClient mongo_client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch(collection, embeddings) """ def __init__( self, collection: Collection[MongoDBDocumentType], embedding: Embeddings, *, index_name: str = "default", text_key: str = "text", embedding_key: str = "embedding", ): """ Args: collection: MongoDB collection to add the texts to.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
2b0cfa121219-1
""" Args: collection: MongoDB collection to add the texts to. embedding: Text embedding model to use. text_key: MongoDB field that will contain the text for each document. embedding_key: MongoDB field that will contain the embedding for each document. """ self._collection = collection self._embedding = embedding self._index_name = index_name self._text_key = text_key self._embedding_key = embedding_key [docs] @classmethod def from_connection_string( cls, connection_string: str, namespace: str, embedding: Embeddings, **kwargs: Any, ) -> MongoDBAtlasVectorSearch: try: from pymongo import MongoClient except ImportError: raise ImportError( "Could not import pymongo, please install it with " "`pip install pymongo`." ) client: MongoClient = MongoClient(connection_string) db_name, collection_name = namespace.split(".") collection = client[db_name][collection_name] return cls(collection, embedding, **kwargs) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ) -> List: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
2b0cfa121219-2
""" batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE) _metadatas: Union[List, Generator] = metadatas or ({} for _ in texts) texts_batch = [] metadatas_batch = [] result_ids = [] for i, (text, metadata) in enumerate(zip(texts, _metadatas)): texts_batch.append(text) metadatas_batch.append(metadata) if (i + 1) % batch_size == 0: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) texts_batch = [] metadatas_batch = [] if texts_batch: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) return result_ids def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List: if not texts: return [] # Embed and create the documents embeddings = self._embedding.embed_documents(texts) to_insert = [ {self._text_key: t, self._embedding_key: embedding, **m} for t, m, embedding in zip(texts, metadatas, embeddings) ] # insert the documents in MongoDB Atlas insert_result = self._collection.insert_many(to_insert) return insert_result.inserted_ids [docs] def similarity_search_with_score( self, query: str, *, k: int = 4, pre_filter: Optional[dict] = None, post_filter_pipeline: Optional[List[Dict]] = None, ) -> List[Tuple[Document, float]]: """Return MongoDB documents most similar to query, along with scores.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
2b0cfa121219-3
"""Return MongoDB documents most similar to query, along with scores. Use the knnBeta Operator available in MongoDB Atlas Search This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: Optional Number of Documents to return. Defaults to 4. pre_filter: Optional Dictionary of argument(s) to prefilter on document fields. post_filter_pipeline: Optional Pipeline of MongoDB aggregation stages following the knnBeta search. Returns: List of Documents most similar to the query and score for each """ knn_beta = { "vector": self._embedding.embed_query(query), "path": self._embedding_key, "k": k, } if pre_filter: knn_beta["filter"] = pre_filter pipeline = [ { "$search": { "index": self._index_name, "knnBeta": knn_beta, } }, {"$project": {"score": {"$meta": "searchScore"}, self._embedding_key: 0}}, ] if post_filter_pipeline is not None: pipeline.extend(post_filter_pipeline) cursor = self._collection.aggregate(pipeline) docs = [] for res in cursor: text = res.pop(self._text_key) score = res.pop("score") docs.append((Document(page_content=text, metadata=res), score)) return docs
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
2b0cfa121219-4
docs.append((Document(page_content=text, metadata=res), score)) return docs [docs] def similarity_search( self, query: str, k: int = 4, pre_filter: Optional[dict] = None, post_filter_pipeline: Optional[List[Dict]] = None, **kwargs: Any, ) -> List[Document]: """Return MongoDB documents most similar to query. Use the knnBeta Operator available in MongoDB Atlas Search This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: Optional Number of Documents to return. Defaults to 4. pre_filter: Optional Dictionary of argument(s) to prefilter on document fields. post_filter_pipeline: Optional Pipeline of MongoDB aggregation stages following the knnBeta search. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( query, k=k, pre_filter=pre_filter, post_filter_pipeline=post_filter_pipeline, ) return [doc for doc, _ in docs_and_scores] [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection: Optional[Collection[MongoDBDocumentType]] = None,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
2b0cfa121219-5
collection: Optional[Collection[MongoDBDocumentType]] = None, **kwargs: Any, ) -> MongoDBAtlasVectorSearch: """Construct MongoDBAtlasVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided MongoDB Atlas Vector Search index (Lucene) This is intended to be a quick way to get started. Example: .. code-block:: python from pymongo import MongoClient from langchain.vectorstores import MongoDBAtlasVectorSearch from langchain.embeddings import OpenAIEmbeddings client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch.from_texts( texts, embeddings, metadatas=metadatas, collection=collection ) """ if collection is None: raise ValueError("Must provide 'collection' named parameter.") vecstore = cls(collection, embedding, **kwargs) vecstore.add_texts(texts, metadatas=metadatas) return vecstore By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html