date_collected
stringclasses
1 value
repo_name
stringlengths
6
116
file_name
stringlengths
2
220
file_contents
stringlengths
13
357k
prompts
sequence
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~together.py
"""Wrapper around Together AI's Completion API.""" import logging from typing import Any, Dict, List, Optional from aiohttp import ClientSession from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import Extra, SecretStr, root_validator from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env from langchain_community.utilities.requests import Requests logger = logging.getLogger(__name__) class Together(LLM): """LLM models from `Together`. To use, you'll need an API key which you can find here: https://api.together.xyz/settings/api-keys. This can be passed in as init param ``together_api_key`` or set as environment variable ``TOGETHER_API_KEY``. Together AI API reference: https://docs.together.ai/reference/inference """ base_url: str = "https://api.together.xyz/inference" """Base inference API URL.""" together_api_key: SecretStr """Together AI API key. Get it here: https://api.together.xyz/settings/api-keys""" model: str """Model name. Available models listed here: https://docs.together.ai/docs/inference-models """ temperature: Optional[float] = None """Model temperature.""" top_p: Optional[float] = None """Used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. A value of 1 will always yield the same output. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value greater than 1 introduces more randomness in the output. """ top_k: Optional[int] = None """Used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. """ max_tokens: Optional[int] = None """The maximum number of tokens to generate.""" repetition_penalty: Optional[float] = None """A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. """ logprobs: Optional[int] = None """An integer that specifies how many top token log probabilities are included in the response for each token generation step. """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" values["together_api_key"] = convert_to_secret_str( get_from_dict_or_env(values, "together_api_key", "TOGETHER_API_KEY") ) return values @property def _llm_type(self) -> str: """Return type of model.""" return "together" def _format_output(self, output: dict) -> str: return output["output"]["choices"][0]["text"] @staticmethod def get_user_agent() -> str: from langchain_community import __version__ return f"langchain/{__version__}" @property def default_params(self) -> Dict[str, Any]: return { "model": self.model, "temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "max_tokens": self.max_tokens, "repetition_penalty": self.repetition_penalty, } def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Together's text generation endpoint. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model.. """ headers = { "Authorization": f"Bearer {self.together_api_key.get_secret_value()}", "Content-Type": "application/json", } stop_to_use = stop[0] if stop and len(stop) == 1 else stop payload: Dict[str, Any] = { **self.default_params, "prompt": prompt, "stop": stop_to_use, **kwargs, } # filter None values to not pass them to the http payload payload = {k: v for k, v in payload.items() if v is not None} request = Requests(headers=headers) response = request.post(url=self.base_url, data=payload) if response.status_code >= 500: raise Exception(f"Together Server: Error {response.status_code}") elif response.status_code >= 400: raise ValueError(f"Together received an invalid payload: {response.text}") elif response.status_code != 200: raise Exception( f"Together returned an unexpected response with status " f"{response.status_code}: {response.text}" ) data = response.json() if data.get("status") != "finished": err_msg = data.get("error", "Undefined Error") raise Exception(err_msg) output = self._format_output(data) return output async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call Together model to get predictions based on the prompt. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. """ headers = { "Authorization": f"Bearer {self.together_api_key.get_secret_value()}", "Content-Type": "application/json", } stop_to_use = stop[0] if stop and len(stop) == 1 else stop payload: Dict[str, Any] = { **self.default_params, "prompt": prompt, "stop": stop_to_use, **kwargs, } # filter None values to not pass them to the http payload payload = {k: v for k, v in payload.items() if v is not None} async with ClientSession() as session: async with session.post( self.base_url, json=payload, headers=headers ) as response: if response.status >= 500: raise Exception(f"Together Server: Error {response.status}") elif response.status >= 400: raise ValueError( f"Together received an invalid payload: {response.text}" ) elif response.status != 200: raise Exception( f"Together returned an unexpected response with status " f"{response.status}: {response.text}" ) response_json = await response.json() if response_json.get("status") != "finished": err_msg = response_json.get("error", "Undefined Error") raise Exception(err_msg) output = self._format_output(response_json) return output
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~zapier~toolkit.py
"""[DEPRECATED] Zapier Toolkit.""" from typing import List from libs.core.langchain_core._api import warn_deprecated from langchain_community.agent_toolkits.base import BaseToolkit from langchain_community.tools import BaseTool from langchain_community.tools.zapier.tool import ZapierNLARunAction from langchain_community.utilities.zapier import ZapierNLAWrapper class ZapierToolkit(BaseToolkit): """Zapier Toolkit.""" tools: List[BaseTool] = [] @classmethod def from_zapier_nla_wrapper( cls, zapier_nla_wrapper: ZapierNLAWrapper ) -> "ZapierToolkit": """Create a toolkit from a ZapierNLAWrapper.""" actions = zapier_nla_wrapper.list() tools = [ ZapierNLARunAction( action_id=action["id"], zapier_description=action["description"], params_schema=action["params"], api_wrapper=zapier_nla_wrapper, ) for action in actions ] return cls(tools=tools) @classmethod async def async_from_zapier_nla_wrapper( cls, zapier_nla_wrapper: ZapierNLAWrapper ) -> "ZapierToolkit": """Create a toolkit from a ZapierNLAWrapper.""" actions = await zapier_nla_wrapper.alist() tools = [ ZapierNLARunAction( action_id=action["id"], zapier_description=action["description"], params_schema=action["params"], api_wrapper=zapier_nla_wrapper, ) for action in actions ] return cls(tools=tools) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" warn_deprecated( since="0.0.319", message=( "This tool will be deprecated on 2023-11-17. See " "https://nla.zapier.com/sunset/ for details" ), ) return self.tools
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~question_answering~stuff_prompt.py
# flake8: noqa from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model from libs.core.langchain_core.prompts import PromptTemplate from libs.core.langchain_core.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. {context} Question: {question} Helpful Answer:""" PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) system_template = """Use the following pieces of context to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer. ---------------- {context}""" messages = [ SystemMessagePromptTemplate.from_template(system_template), HumanMessagePromptTemplate.from_template("{question}"), ] CHAT_PROMPT = ChatPromptTemplate.from_messages(messages) PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)] )
[ "Use the following pieces of context to answer the user's question. \nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\n{context}", "question", "t know the answer, just say that you don", "context", "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:", "{question}" ]
2024-01-10
mth93/langchain
libs~community~tests~unit_tests~vectorstores~test_utils.py
"""Test vector store utility functions.""" import numpy as np from libs.core.langchain_core.documents import Document from langchain_community.vectorstores.utils import ( filter_complex_metadata, maximal_marginal_relevance, ) def test_maximal_marginal_relevance_lambda_zero() -> None: query_embedding = np.random.random(size=5) embedding_list = [query_embedding, query_embedding, np.zeros(5)] expected = [0, 2] actual = maximal_marginal_relevance( query_embedding, embedding_list, lambda_mult=0, k=2 ) assert expected == actual def test_maximal_marginal_relevance_lambda_one() -> None: query_embedding = np.random.random(size=5) embedding_list = [query_embedding, query_embedding, np.zeros(5)] expected = [0, 1] actual = maximal_marginal_relevance( query_embedding, embedding_list, lambda_mult=1, k=2 ) assert expected == actual def test_maximal_marginal_relevance() -> None: query_embedding = np.array([1, 0]) # Vectors that are 30, 45 and 75 degrees from query vector (cosine similarity of # 0.87, 0.71, 0.26) and the latter two are 15 and 60 degree from the first # (cosine similarity 0.97 and 0.71). So for 3rd vector be chosen, must be case that # 0.71lambda - 0.97(1 - lambda) < 0.26lambda - 0.71(1-lambda) # -> lambda ~< .26 / .71 embedding_list = [[3**0.5, 1], [1, 1], [1, 2 + (3**0.5)]] expected = [0, 2] actual = maximal_marginal_relevance( query_embedding, embedding_list, lambda_mult=(25 / 71), k=2 ) assert expected == actual expected = [0, 1] actual = maximal_marginal_relevance( query_embedding, embedding_list, lambda_mult=(27 / 71), k=2 ) assert expected == actual def test_maximal_marginal_relevance_query_dim() -> None: query_embedding = np.random.random(size=5) query_embedding_2d = query_embedding.reshape((1, 5)) embedding_list = np.random.random(size=(4, 5)).tolist() first = maximal_marginal_relevance(query_embedding, embedding_list) second = maximal_marginal_relevance(query_embedding_2d, embedding_list) assert first == second def test_filter_list_metadata() -> None: documents = [ Document( page_content="", metadata={ "key1": "this is a string!", "key2": ["a", "list", "of", "strings"], }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": {"foo"}, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": {"foo": "bar"}, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": True, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": 1, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": 1.0, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": "foo", }, ), ] updated_documents = filter_complex_metadata(documents) filtered_metadata = [doc.metadata for doc in updated_documents] assert filtered_metadata == [ {"key1": "this is a string!"}, {"key1": "this is another string!"}, {"key1": "this is another string!"}, {"key1": "this is another string!", "key2": True}, {"key1": "this is another string!", "key2": 1}, {"key1": "this is another string!", "key2": 1.0}, {"key1": "this is another string!", "key2": "foo"}, ]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~evaluation~agents~trajectory_eval_chain.py
"""A chain for evaluating ReAct style agents. This chain is used to evaluate ReAct style agents by reasoning about the sequence of actions taken and their outcomes. It uses a language model chain (LLMChain) to generate the reasoning and scores. """ import re from typing import ( Any, Dict, List, Optional, Sequence, Tuple, TypedDict, Union, cast, ) from libs.core.langchain_core.agents import AgentAction from libs.core.langchain_core.exceptions import OutputParserException from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.output_parsers import BaseOutputParser from libs.core.langchain_core.pydantic_v1 import Extra, Field from libs.core.langchain_core.tools import BaseTool from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.llm import LLMChain from langchain.evaluation.agents.trajectory_eval_prompt import ( EVAL_CHAT_PROMPT, TOOL_FREE_EVAL_CHAT_PROMPT, ) from langchain.evaluation.schema import AgentTrajectoryEvaluator, LLMEvalChain class TrajectoryEval(TypedDict): """A named tuple containing the score and reasoning for a trajectory.""" score: float """The score for the trajectory, normalized from 0 to 1.""" reasoning: str """The reasoning for the score.""" class TrajectoryOutputParser(BaseOutputParser): """Trajectory output parser.""" @property def _type(self) -> str: return "agent_trajectory" def parse(self, text: str) -> TrajectoryEval: """Parse the output text and extract the score and reasoning. Args: text (str): The output text to parse. Returns: TrajectoryEval: A named tuple containing the normalized score and reasoning. Raises: OutputParserException: If the score is not found in the output text or if the LLM's score is not a digit in the range 1-5. """ if "Score:" not in text: raise OutputParserException( f"Could not find score in model eval output: {text}" ) reasoning, score_str = text.split("Score: ", maxsplit=1) reasoning, score_str = reasoning.strip(), score_str.strip() # Use regex to extract the score. # This will get the number in the string, even if it is a float or more than 10. # E.g. "Score: 1" will return 1, "Score: 3.5" will return 3.5, and # "Score: 10" will return 10. # The score should be an integer digit in the range 1-5. _score = re.search(r"(\d+(\.\d+)?)", score_str) # If the score is not found or is a float, raise an exception. if _score is None or "." in _score.group(1): raise OutputParserException( f"Score is not an integer digit in the range 1-5: {text}" ) score = int(_score.group(1)) # If the score is not in the range 1-5, raise an exception. if not 1 <= score <= 5: raise OutputParserException( f"Score is not a digit in the range 1-5: {text}" ) normalized_score = (score - 1) / 4 return TrajectoryEval(score=normalized_score, reasoning=reasoning) class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): """A chain for evaluating ReAct style agents. This chain is used to evaluate ReAct style agents by reasoning about the sequence of actions taken and their outcomes. Example: .. code-block:: python from langchain.agents import AgentType, initialize_agent from langchain.chat_models import ChatOpenAI from langchain.evaluation import TrajectoryEvalChain from langchain.tools import tool @tool def geography_answers(country: str, question: str) -> str: \"\"\"Very helpful answers to geography questions.\"\"\" return f"{country}? IDK - We may never know {question}." llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) agent = initialize_agent( tools=[geography_answers], llm=llm, agent=AgentType.OPENAI_FUNCTIONS, return_intermediate_steps=True, ) question = "How many dwell in the largest minor region in Argentina?" response = agent(question) eval_chain = TrajectoryEvalChain.from_llm( llm=llm, agent_tools=[geography_answers], return_reasoning=True ) result = eval_chain.evaluate_agent_trajectory( input=question, agent_trajectory=response["intermediate_steps"], prediction=response["output"], reference="Paris", ) print(result["score"]) # 0 """ # noqa: E501 agent_tools: Optional[List[BaseTool]] = None """A list of tools available to the agent.""" eval_chain: LLMChain """The language model chain used for evaluation.""" output_parser: TrajectoryOutputParser = Field( default_factory=TrajectoryOutputParser ) """The output parser used to parse the output.""" return_reasoning: bool = False # :meta private: """DEPRECATED. Reasoning always returned.""" class Config: """Configuration for the QAEvalChain.""" extra = Extra.ignore @property def requires_reference(self) -> bool: """Whether this evaluator requires a reference label.""" return False @property def _tools_description(self) -> str: """Get the description of the agent tools. Returns: str: The description of the agent tools. """ if self.agent_tools is None: return "" return "\n\n".join( [ f"""Tool {i}: {tool.name} Description: {tool.description}""" for i, tool in enumerate(self.agent_tools, 1) ] ) @staticmethod def get_agent_trajectory( steps: Union[str, Sequence[Tuple[AgentAction, str]]], ) -> str: """Get the agent trajectory as a formatted string. Args: steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory. Returns: str: The formatted agent trajectory. """ if isinstance(steps, str): return steps return "\n\n".join( [ f"""Step {i}: Tool used: {action.tool} Tool input: {action.tool_input} Tool output: {output}""" for i, (action, output) in enumerate(steps, 1) ] ) @staticmethod def _format_reference(reference: Optional[str]) -> str: """Format the reference text. Args: reference (str): The reference text. Returns: str: The formatted reference text. """ if not reference: return "" return f""" The following is the expected answer. Use this to measure correctness: [GROUND_TRUTH] {reference} [END_GROUND_TRUTH] """ @classmethod def from_llm( cls, llm: BaseLanguageModel, agent_tools: Optional[Sequence[BaseTool]] = None, output_parser: Optional[TrajectoryOutputParser] = None, **kwargs: Any, ) -> "TrajectoryEvalChain": """Create a TrajectoryEvalChain object from a language model chain. Args: llm (BaseChatModel): The language model chain. agent_tools (Optional[Sequence[BaseTool]]): A list of tools available to the agent. output_parser (Optional[TrajectoryOutputParser]): The output parser used to parse the chain output into a score. Returns: TrajectoryEvalChain: The TrajectoryEvalChain object. """ if not isinstance(llm, BaseChatModel): raise NotImplementedError( "Only chat models supported by the current trajectory eval" ) if agent_tools: prompt = EVAL_CHAT_PROMPT else: prompt = TOOL_FREE_EVAL_CHAT_PROMPT eval_chain = LLMChain(llm=llm, prompt=prompt) return cls( agent_tools=agent_tools, eval_chain=eval_chain, output_parser=output_parser or TrajectoryOutputParser(), **kwargs, ) @property def input_keys(self) -> List[str]: """Get the input keys for the chain. Returns: List[str]: The input keys. """ return ["question", "agent_trajectory", "answer", "reference"] @property def output_keys(self) -> List[str]: """Get the output keys for the chain. Returns: List[str]: The output keys. """ return ["score", "reasoning"] def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]: """Validate and prep inputs.""" if "reference" not in inputs: inputs["reference"] = self._format_reference(inputs.get("reference")) return super().prep_inputs(inputs) def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the chain and generate the output. Args: inputs (Dict[str, str]): The input values for the chain. run_manager (Optional[CallbackManagerForChainRun]): The callback manager for the chain run. Returns: Dict[str, Any]: The output values of the chain. """ chain_input = {**inputs} if self.agent_tools: chain_input["tool_descriptions"] = self._tools_description _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() raw_output = self.eval_chain.run( chain_input, callbacks=_run_manager.get_child() ) return cast(dict, self.output_parser.parse(raw_output)) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the chain and generate the output. Args: inputs (Dict[str, str]): The input values for the chain. run_manager (Optional[CallbackManagerForChainRun]): The callback manager for the chain run. Returns: Dict[str, Any]: The output values of the chain. """ chain_input = {**inputs} if self.agent_tools: chain_input["tool_descriptions"] = self._tools_description _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() raw_output = await self.eval_chain.arun( chain_input, callbacks=_run_manager.get_child() ) return cast(dict, self.output_parser.parse(raw_output)) def _evaluate_agent_trajectory( self, *, prediction: str, input: str, agent_trajectory: Sequence[Tuple[AgentAction, str]], reference: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Evaluate a trajectory. Args: prediction (str): The final predicted response. input (str): The input to the agent. agent_trajectory (List[Tuple[AgentAction, str]]): The intermediate steps forming the agent trajectory. reference (Optional[str]): The reference answer. callbacks (Callbacks): Callbacks to use for this chain run. Returns: dict: The evaluation result, which includes the score and optionally the reasoning for reaching that. """ inputs = { "question": input, "agent_trajectory": self.get_agent_trajectory(agent_trajectory), "answer": prediction, "reference": reference, } return self.__call__( inputs=inputs, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, return_only_outputs=True, ) async def _aevaluate_agent_trajectory( self, *, prediction: str, input: str, agent_trajectory: Sequence[Tuple[AgentAction, str]], reference: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Asynchronously evaluate a trajectory. Args: prediction (str): The final predicted response. input (str): The input to the agent. agent_trajectory (List[Tuple[AgentAction, str]]): The intermediate steps forming the agent trajectory. reference (Optional[str]): The reference answer. callbacks (Callbacks): Callbacks to use for this chain run. Returns: dict: The evaluation result, which includes the score and optionally the reasoning for reaching that. """ inputs = { "question": input, "agent_trajectory": self.get_agent_trajectory(agent_trajectory), "answer": prediction, "reference": reference, } return await self.acall( inputs=inputs, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, return_only_outputs=True, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~neo4j_vector.py
from __future__ import annotations import enum import logging import os import uuid from typing import ( Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, ) from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils import get_from_env from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import DistanceStrategy DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE DISTANCE_MAPPING = { DistanceStrategy.EUCLIDEAN_DISTANCE: "euclidean", DistanceStrategy.COSINE: "cosine", } class SearchType(str, enum.Enum): """Enumerator of the Distance strategies.""" VECTOR = "vector" HYBRID = "hybrid" DEFAULT_SEARCH_TYPE = SearchType.VECTOR def _get_search_index_query(search_type: SearchType) -> str: type_to_query_map = { SearchType.VECTOR: ( "CALL db.index.vector.queryNodes($index, $k, $embedding) YIELD node, score " ), SearchType.HYBRID: ( "CALL { " "CALL db.index.vector.queryNodes($index, $k, $embedding) " "YIELD node, score " "WITH collect({node:node, score:score}) AS nodes, max(score) AS max " "UNWIND nodes AS n " # We use 0 as min "RETURN n.node AS node, (n.score / max) AS score UNION " "CALL db.index.fulltext.queryNodes($keyword_index, $query, {limit: $k}) " "YIELD node, score " "WITH collect({node:node, score:score}) AS nodes, max(score) AS max " "UNWIND nodes AS n " # We use 0 as min "RETURN n.node AS node, (n.score / max) AS score " "} " # dedup "WITH node, max(score) AS score ORDER BY score DESC LIMIT $k " ), } return type_to_query_map[search_type] def check_if_not_null(props: List[str], values: List[Any]) -> None: """Check if the values are not None or empty string""" for prop, value in zip(props, values): if not value: raise ValueError(f"Parameter `{prop}` must not be None or empty string") def sort_by_index_name( lst: List[Dict[str, Any]], index_name: str ) -> List[Dict[str, Any]]: """Sort first element to match the index_name if exists""" return sorted(lst, key=lambda x: x.get("index_name") != index_name) def remove_lucene_chars(text: str) -> str: """Remove Lucene special characters""" special_chars = [ "+", "-", "&", "|", "!", "(", ")", "{", "}", "[", "]", "^", '"', "~", "*", "?", ":", "\\", ] for char in special_chars: if char in text: text = text.replace(char, " ") return text.strip() class Neo4jVector(VectorStore): """`Neo4j` vector index. To use, you should have the ``neo4j`` python package installed. Args: url: Neo4j connection url username: Neo4j username. password: Neo4j password database: Optionally provide Neo4j database Defaults to "neo4j" embedding: Any embedding function implementing `langchain.embeddings.base.Embeddings` interface. distance_strategy: The distance strategy to use. (default: COSINE) pre_delete_collection: If True, will delete existing data if it exists. (default: False). Useful for testing. Example: .. code-block:: python from langchain_community.vectorstores.neo4j_vector import Neo4jVector from langchain_community.embeddings.openai import OpenAIEmbeddings url="bolt://localhost:7687" username="neo4j" password="pleaseletmein" embeddings = OpenAIEmbeddings() vectorestore = Neo4jVector.from_documents( embedding=embeddings, documents=docs, url=url username=username, password=password, ) """ def __init__( self, embedding: Embeddings, *, search_type: SearchType = SearchType.VECTOR, username: Optional[str] = None, password: Optional[str] = None, url: Optional[str] = None, keyword_index_name: Optional[str] = "keyword", database: str = "neo4j", index_name: str = "vector", node_label: str = "Chunk", embedding_node_property: str = "embedding", text_node_property: str = "text", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, logger: Optional[logging.Logger] = None, pre_delete_collection: bool = False, retrieval_query: str = "", relevance_score_fn: Optional[Callable[[float], float]] = None, ) -> None: try: import neo4j except ImportError: raise ImportError( "Could not import neo4j python package. " "Please install it with `pip install neo4j`." ) # Allow only cosine and euclidean distance strategies if distance_strategy not in [ DistanceStrategy.EUCLIDEAN_DISTANCE, DistanceStrategy.COSINE, ]: raise ValueError( "distance_strategy must be either 'EUCLIDEAN_DISTANCE' or 'COSINE'" ) # Handle if the credentials are environment variables # Support URL for backwards compatibility url = os.environ.get("NEO4J_URL", url) url = get_from_env("url", "NEO4J_URI", url) username = get_from_env("username", "NEO4J_USERNAME", username) password = get_from_env("password", "NEO4J_PASSWORD", password) database = get_from_env("database", "NEO4J_DATABASE", database) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database self.schema = "" # Verify connection try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the url is correct" ) except neo4j.exceptions.AuthError: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the username and password are correct" ) # Verify if the version support vector index self.verify_version() # Verify that required values are not null check_if_not_null( [ "index_name", "node_label", "embedding_node_property", "text_node_property", ], [index_name, node_label, embedding_node_property, text_node_property], ) self.embedding = embedding self._distance_strategy = distance_strategy self.index_name = index_name self.keyword_index_name = keyword_index_name self.node_label = node_label self.embedding_node_property = embedding_node_property self.text_node_property = text_node_property self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.retrieval_query = retrieval_query self.search_type = search_type # Calculate embedding dimension self.embedding_dimension = len(embedding.embed_query("foo")) # Delete existing data if flagged if pre_delete_collection: from neo4j.exceptions import DatabaseError self.query( f"MATCH (n:`{self.node_label}`) " "CALL { WITH n DETACH DELETE n } " "IN TRANSACTIONS OF 10000 ROWS;" ) # Delete index try: self.query(f"DROP INDEX {self.index_name}") except DatabaseError: # Index didn't exist yet pass def query( self, query: str, *, params: Optional[dict] = None ) -> List[Dict[str, Any]]: """ This method sends a Cypher query to the connected Neo4j database and returns the results as a list of dictionaries. Args: query (str): The Cypher query to execute. params (dict, optional): Dictionary of query parameters. Defaults to {}. Returns: List[Dict[str, Any]]: List of dictionaries containing the query results. """ from neo4j.exceptions import CypherSyntaxError params = params or {} with self._driver.session(database=self._database) as session: try: data = session.run(query, params) return [r.data() for r in data] except CypherSyntaxError as e: raise ValueError(f"Cypher Statement is not valid\n{e}") def verify_version(self) -> None: """ Check if the connected Neo4j database version supports vector indexing. Queries the Neo4j database to retrieve its version and compares it against a target version (5.11.0) that is known to support vector indexing. Raises a ValueError if the connected Neo4j version is not supported. """ version = self.query("CALL dbms.components()")[0]["versions"][0] if "aura" in version: version_tuple = tuple(map(int, version.split("-")[0].split("."))) + (0,) else: version_tuple = tuple(map(int, version.split("."))) target_version = (5, 11, 0) if version_tuple < target_version: raise ValueError( "Version index is only supported in Neo4j version 5.11 or greater" ) def retrieve_existing_index(self) -> Optional[int]: """ Check if the vector index exists in the Neo4j database and returns its embedding dimension. This method queries the Neo4j database for existing indexes and attempts to retrieve the dimension of the vector index with the specified name. If the index exists, its dimension is returned. If the index doesn't exist, `None` is returned. Returns: int or None: The embedding dimension of the existing index if found. """ index_information = self.query( "SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options " "WHERE type = 'VECTOR' AND (name = $index_name " "OR (labelsOrTypes[0] = $node_label AND " "properties[0] = $embedding_node_property)) " "RETURN name, labelsOrTypes, properties, options ", params={ "index_name": self.index_name, "node_label": self.node_label, "embedding_node_property": self.embedding_node_property, }, ) # sort by index_name index_information = sort_by_index_name(index_information, self.index_name) try: self.index_name = index_information[0]["name"] self.node_label = index_information[0]["labelsOrTypes"][0] self.embedding_node_property = index_information[0]["properties"][0] embedding_dimension = index_information[0]["options"]["indexConfig"][ "vector.dimensions" ] return embedding_dimension except IndexError: return None def retrieve_existing_fts_index( self, text_node_properties: List[str] = [] ) -> Optional[str]: """ Check if the fulltext index exists in the Neo4j database This method queries the Neo4j database for existing fts indexes with the specified name. Returns: (Tuple): keyword index information """ index_information = self.query( "SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options " "WHERE type = 'FULLTEXT' AND (name = $keyword_index_name " "OR (labelsOrTypes = [$node_label] AND " "properties = $text_node_property)) " "RETURN name, labelsOrTypes, properties, options ", params={ "keyword_index_name": self.keyword_index_name, "node_label": self.node_label, "text_node_property": text_node_properties or [self.text_node_property], }, ) # sort by index_name index_information = sort_by_index_name(index_information, self.index_name) try: self.keyword_index_name = index_information[0]["name"] self.text_node_property = index_information[0]["properties"][0] node_label = index_information[0]["labelsOrTypes"][0] return node_label except IndexError: return None def create_new_index(self) -> None: """ This method constructs a Cypher query and executes it to create a new vector index in Neo4j. """ index_query = ( "CALL db.index.vector.createNodeIndex(" "$index_name," "$node_label," "$embedding_node_property," "toInteger($embedding_dimension)," "$similarity_metric )" ) parameters = { "index_name": self.index_name, "node_label": self.node_label, "embedding_node_property": self.embedding_node_property, "embedding_dimension": self.embedding_dimension, "similarity_metric": DISTANCE_MAPPING[self._distance_strategy], } self.query(index_query, params=parameters) def create_new_keyword_index(self, text_node_properties: List[str] = []) -> None: """ This method constructs a Cypher query and executes it to create a new full text index in Neo4j. """ node_props = text_node_properties or [self.text_node_property] fts_index_query = ( f"CREATE FULLTEXT INDEX {self.keyword_index_name} " f"FOR (n:`{self.node_label}`) ON EACH " f"[{', '.join(['n.`' + el + '`' for el in node_props])}]" ) self.query(fts_index_query) @property def embeddings(self) -> Embeddings: return self.embedding @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, create_id_index: bool = True, search_type: SearchType = SearchType.VECTOR, **kwargs: Any, ) -> Neo4jVector: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] store = cls( embedding=embedding, search_type=search_type, **kwargs, ) # Check if the vector index already exists embedding_dimension = store.retrieve_existing_index() # If the vector index doesn't exist yet if not embedding_dimension: store.create_new_index() # If the index already exists, check if embedding dimensions match elif not store.embedding_dimension == embedding_dimension: raise ValueError( f"Index with name {store.index_name} already exists." "The provided embedding function and vector index " "dimensions do not match.\n" f"Embedding function dimension: {store.embedding_dimension}\n" f"Vector index dimension: {embedding_dimension}" ) if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index() # If the FTS index doesn't exist yet if not fts_node_label: store.create_new_keyword_index() else: # Validate that FTS and Vector index use the same information if not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label" ) # Create unique constraint for faster import if create_id_index: store.query( "CREATE CONSTRAINT IF NOT EXISTS " f"FOR (n:`{store.node_label}`) REQUIRE n.id IS UNIQUE;" ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] import_query = ( "UNWIND $data AS row " "CALL { WITH row " f"MERGE (c:`{self.node_label}` {{id: row.id}}) " "WITH c, row " f"CALL db.create.setVectorProperty(c, " f"'{self.embedding_node_property}', row.embedding) " "YIELD node " f"SET c.`{self.text_node_property}` = row.text " "SET c += row.metadata } IN TRANSACTIONS OF 1000 ROWS" ) parameters = { "data": [ {"text": text, "metadata": metadata, "embedding": embedding, "id": id} for text, metadata, embedding, id in zip( texts, metadatas, embeddings, ids ) ] } self.query(import_query, params=parameters) return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return self.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: """Run similarity search with Neo4jVector. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. Returns: List of Documents most similar to the query. """ embedding = self.embedding.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, query=query, ) def similarity_search_with_score( self, query: str, k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, query=query ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Perform a similarity search in the Neo4j database using a given vector and return the top k similar documents with their scores. This method uses a Cypher query to find the top k documents that are most similar to a given embedding. The similarity is measured using a vector index in the Neo4j database. The results are returned as a list of tuples, each containing a Document object and its similarity score. Args: embedding (List[float]): The embedding vector to compare against. k (int, optional): The number of top similar documents to retrieve. Returns: List[Tuple[Document, float]]: A list of tuples, each containing a Document object and its similarity score. """ default_retrieval = ( f"RETURN node.`{self.text_node_property}` AS text, score, " f"node {{.*, `{self.text_node_property}`: Null, " f"`{self.embedding_node_property}`: Null, id: Null }} AS metadata" ) retrieval_query = ( self.retrieval_query if self.retrieval_query else default_retrieval ) read_query = _get_search_index_query(self.search_type) + retrieval_query parameters = { "index": self.index_name, "k": k, "embedding": embedding, "keyword_index": self.keyword_index_name, "query": remove_lucene_chars(kwargs["query"]), } results = self.query(read_query, params=parameters) docs = [ ( Document( page_content=result["text"], metadata={ k: v for k, v in result["metadata"].items() if v is not None }, ), result["score"], ) for result in results ] return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, **kwargs ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Type[Neo4jVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Neo4jVector: """ Return Neo4jVector initialized from texts and embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, distance_strategy=distance_strategy, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Neo4jVector: """Construct Neo4jVector wrapper from raw documents and pre- generated embeddings. Return Neo4jVector initialized from documents and embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters. Example: .. code-block:: python from langchain_community.vectorstores.neo4j_vector import Neo4jVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) vectorstore = Neo4jVector.from_embeddings( text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[Neo4jVector], embedding: Embeddings, index_name: str, search_type: SearchType = DEFAULT_SEARCH_TYPE, keyword_index_name: Optional[str] = None, **kwargs: Any, ) -> Neo4jVector: """ Get instance of an existing Neo4j vector index. This method will return the instance of the store without inserting any new embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters along with the `index_name` definition. """ if search_type == SearchType.HYBRID and not keyword_index_name: raise ValueError( "keyword_index name has to be specified " "when using hybrid search option" ) store = cls( embedding=embedding, index_name=index_name, keyword_index_name=keyword_index_name, search_type=search_type, **kwargs, ) embedding_dimension = store.retrieve_existing_index() if not embedding_dimension: raise ValueError( "The specified vector index name does not exist. " "Make sure to check if you spelled it correctly" ) # Check if embedding function and vector index dimensions match if not store.embedding_dimension == embedding_dimension: raise ValueError( "The provided embedding function and vector index " "dimensions do not match.\n" f"Embedding function dimension: {store.embedding_dimension}\n" f"Vector index dimension: {embedding_dimension}" ) if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index() # If the FTS index doesn't exist yet if not fts_node_label: raise ValueError( "The specified keyword index name does not exist. " "Make sure to check if you spelled it correctly" ) else: # Validate that FTS and Vector index use the same information if not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label" ) return store @classmethod def from_documents( cls: Type[Neo4jVector], documents: List[Document], embedding: Embeddings, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Neo4jVector: """ Return Neo4jVector initialized from documents and embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts( texts=texts, embedding=embedding, distance_strategy=distance_strategy, metadatas=metadatas, ids=ids, **kwargs, ) @classmethod def from_existing_graph( cls: Type[Neo4jVector], embedding: Embeddings, node_label: str, embedding_node_property: str, text_node_properties: List[str], *, keyword_index_name: Optional[str] = "keyword", index_name: str = "vector", search_type: SearchType = DEFAULT_SEARCH_TYPE, retrieval_query: str = "", **kwargs: Any, ) -> Neo4jVector: """ Initialize and return a Neo4jVector instance from an existing graph. This method initializes a Neo4jVector instance using the provided parameters and the existing graph. It validates the existence of the indices and creates new ones if they don't exist. Returns: Neo4jVector: An instance of Neo4jVector initialized with the provided parameters and existing graph. Example: >>> neo4j_vector = Neo4jVector.from_existing_graph( ... embedding=my_embedding, ... node_label="Document", ... embedding_node_property="embedding", ... text_node_properties=["title", "content"] ... ) Note: Neo4j credentials are required in the form of `url`, `username`, and `password`, and optional `database` parameters passed as additional keyword arguments. """ # Validate the list is not empty if not text_node_properties: raise ValueError( "Parameter `text_node_properties` must not be an empty list" ) # Prefer retrieval query from params, otherwise construct it if not retrieval_query: retrieval_query = ( f"RETURN reduce(str='', k IN {text_node_properties} |" " str + '\\n' + k + ': ' + coalesce(node[k], '')) AS text, " "node {.*, `" + embedding_node_property + "`: Null, id: Null, " + ", ".join([f"`{prop}`: Null" for prop in text_node_properties]) + "} AS metadata, score" ) store = cls( embedding=embedding, index_name=index_name, keyword_index_name=keyword_index_name, search_type=search_type, retrieval_query=retrieval_query, node_label=node_label, embedding_node_property=embedding_node_property, **kwargs, ) # Check if the vector index already exists embedding_dimension = store.retrieve_existing_index() # If the vector index doesn't exist yet if not embedding_dimension: store.create_new_index() # If the index already exists, check if embedding dimensions match elif not store.embedding_dimension == embedding_dimension: raise ValueError( f"Index with name {store.index_name} already exists." "The provided embedding function and vector index " "dimensions do not match.\n" f"Embedding function dimension: {store.embedding_dimension}\n" f"Vector index dimension: {embedding_dimension}" ) # FTS index for Hybrid search if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index(text_node_properties) # If the FTS index doesn't exist yet if not fts_node_label: store.create_new_keyword_index(text_node_properties) else: # Validate that FTS and Vector index use the same information if not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label" ) # Populate embeddings while True: fetch_query = ( f"MATCH (n:`{node_label}`) " f"WHERE n.{embedding_node_property} IS null " "AND any(k in $props WHERE n[k] IS NOT null) " f"RETURN elementId(n) AS id, reduce(str=''," "k IN $props | str + '\\n' + k + ':' + coalesce(n[k], '')) AS text " "LIMIT 1000" ) data = store.query(fetch_query, params={"props": text_node_properties}) text_embeddings = embedding.embed_documents([el["text"] for el in data]) params = { "data": [ {"id": el["id"], "embedding": embedding} for el, embedding in zip(data, text_embeddings) ] } store.query( "UNWIND $data AS row " f"MATCH (n:`{node_label}`) " "WHERE elementId(n) = row.id " f"CALL db.create.setVectorProperty(n, " f"'{embedding_node_property}', row.embedding) " "YIELD node RETURN count(*)", params=params, ) # If embedding calculation should be stopped if len(data) < 1000: break return store def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self._distance_strategy == DistanceStrategy.COSINE: return lambda x: x elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return lambda x: x else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self._distance_strategy}." "Consider providing relevance_score_fn to PGVector constructor." )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~duckdb_loader.py
from typing import Dict, List, Optional, cast from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class DuckDBLoader(BaseLoader): """Load from `DuckDB`. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, database: str = ":memory:", read_only: bool = False, config: Optional[Dict[str, str]] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, ): """ Args: query: The query to execute. database: The database to connect to. Defaults to ":memory:". read_only: Whether to open the database in read-only mode. Defaults to False. config: A dictionary of configuration options to pass to the database. Optional. page_content_columns: The columns to write into the `page_content` of the document. Optional. metadata_columns: The columns to write into the `metadata` of the document. Optional. """ self.query = query self.database = database self.read_only = read_only self.config = config or {} self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns def load(self) -> List[Document]: try: import duckdb except ImportError: raise ImportError( "Could not import duckdb python package. " "Please install it with `pip install duckdb`." ) docs = [] with duckdb.connect( database=self.database, read_only=self.read_only, config=self.config ) as con: query_result = con.execute(self.query) results = query_result.fetchall() description = cast(list, query_result.description) field_names = [c[0] for c in description] if self.page_content_columns is None: page_content_columns = field_names else: page_content_columns = self.page_content_columns if self.metadata_columns is None: metadata_columns = [] else: metadata_columns = self.metadata_columns for result in results: page_content = "\n".join( f"{column}: {result[field_names.index(column)]}" for column in page_content_columns ) metadata = { column: result[field_names.index(column)] for column in metadata_columns } doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~nla~toolkit.py
from __future__ import annotations from typing import Any, List, Optional, Sequence from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.pydantic_v1 import Field from libs.core.langchain_core.tools import BaseTool from langchain_community.agent_toolkits.base import BaseToolkit from langchain_community.agent_toolkits.nla.tool import NLATool from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec from langchain_community.tools.plugin import AIPlugin from langchain_community.utilities.requests import Requests class NLAToolkit(BaseToolkit): """Natural Language API Toolkit. *Security Note*: This toolkit creates tools that enable making calls to an Open API compliant API. The tools created by this toolkit may be able to make GET, POST, PATCH, PUT, DELETE requests to any of the exposed endpoints on the API. Control access to who can use this toolkit. See https://python.langchain.com/docs/security for more information. """ nla_tools: Sequence[NLATool] = Field(...) """List of API Endpoint Tools.""" def get_tools(self) -> List[BaseTool]: """Get the tools for all the API operations.""" return list(self.nla_tools) @staticmethod def _get_http_operation_tools( llm: BaseLanguageModel, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> List[NLATool]: """Get the tools for all the API operations.""" if not spec.paths: return [] http_operation_tools = [] for path in spec.paths: for method in spec.get_methods_for_path(path): endpoint_tool = NLATool.from_llm_and_method( llm=llm, path=path, method=method, spec=spec, requests=requests, verbose=verbose, **kwargs, ) http_operation_tools.append(endpoint_tool) return http_operation_tools @classmethod def from_llm_and_spec( cls, llm: BaseLanguageModel, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit by creating tools for each operation.""" http_operation_tools = cls._get_http_operation_tools( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) return cls(nla_tools=http_operation_tools) @classmethod def from_llm_and_url( cls, llm: BaseLanguageModel, open_api_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(open_api_url) return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) @classmethod def from_llm_and_ai_plugin( cls, llm: BaseLanguageModel, ai_plugin: AIPlugin, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(ai_plugin.api.url) # TODO: Merge optional Auth information with the `requests` argument return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs, ) @classmethod def from_llm_and_ai_plugin_url( cls, llm: BaseLanguageModel, ai_plugin_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" plugin = AIPlugin.from_url(ai_plugin_url) return cls.from_llm_and_ai_plugin( llm=llm, ai_plugin=plugin, requests=requests, verbose=verbose, **kwargs )
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~output_parsers~test_yaml_parser.py
"""Test yamlOutputParser""" from enum import Enum from typing import Optional from libs.core.langchain_core.exceptions import OutputParserException from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain.output_parsers.yaml import YamlOutputParser class Actions(Enum): SEARCH = "Search" CREATE = "Create" UPDATE = "Update" DELETE = "Delete" class TestModel(BaseModel): action: Actions = Field(description="Action to be performed") action_input: str = Field(description="Input to be used in the action") additional_fields: Optional[str] = Field( description="Additional fields", default=None ) for_new_lines: str = Field(description="To be used to test newlines") # Prevent pytest from trying to run tests on TestModel TestModel.__test__ = False # type: ignore[attr-defined] DEF_RESULT = """```yaml --- action: Update action_input: The yamlOutputParser class is powerful additional_fields: null for_new_lines: | not_escape_newline: escape_newline: ```""" # action 'update' with a lowercase 'u' to test schema validation failure. DEF_RESULT_FAIL = """```yaml action: update action_input: The yamlOutputParser class is powerful additional_fields: null ```""" DEF_EXPECTED_RESULT = TestModel( action=Actions.UPDATE, action_input="The yamlOutputParser class is powerful", additional_fields=None, for_new_lines="not_escape_newline:\n escape_newline: \n", ) def test_yaml_output_parser() -> None: """Test yamlOutputParser.""" yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser( pydantic_object=TestModel ) result = yaml_parser.parse(DEF_RESULT) print("parse_result:", result) assert DEF_EXPECTED_RESULT == result def test_yaml_output_parser_fail() -> None: """Test YamlOutputParser where completion result fails schema validation.""" yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser( pydantic_object=TestModel ) try: yaml_parser.parse(DEF_RESULT_FAIL) except OutputParserException as e: print("parse_result:", e) assert "Failed to parse TestModel from completion" in str(e) else: assert False, "Expected OutputParserException"
[]
2024-01-10
mth93/langchain
libs~core~langchain_core~tracers~context.py
from __future__ import annotations from contextlib import contextmanager from contextvars import ContextVar from typing import ( TYPE_CHECKING, Any, Generator, List, Optional, Tuple, Type, Union, cast, ) from uuid import UUID from langsmith import utils as ls_utils from langsmith.run_helpers import get_run_tree_context from libs.core.langchain_core.tracers.langchain import LangChainTracer from libs.core.langchain_core.tracers.run_collector import RunCollectorCallbackHandler from libs.core.langchain_core.utils.env import env_var_is_set if TYPE_CHECKING: from langsmith import Client as LangSmithClient from libs.core.langchain_core.callbacks.base import BaseCallbackHandler, Callbacks from libs.core.langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) run_collector_var: ContextVar[Optional[RunCollectorCallbackHandler]] = ContextVar( # noqa: E501 "run_collector", default=None ) @contextmanager def tracing_v2_enabled( project_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, client: Optional[LangSmithClient] = None, ) -> Generator[LangChainTracer, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name (str, optional): The name of the project. Defaults to "default". example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The tags to add to the run. Defaults to None. Returns: None Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced You can use this to fetch the LangSmith run URL: >>> with tracing_v2_enabled() as cb: ... chain.invoke("foo") ... run_url = cb.get_run_url() """ if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, tags=tags, client=client, ) try: tracing_v2_callback_var.set(cb) yield cb finally: tracing_v2_callback_var.set(None) @contextmanager def collect_runs() -> Generator[RunCollectorCallbackHandler, None, None]: """Collect all run traces in context. Returns: run_collector.RunCollectorCallbackHandler: The run collector callback handler. Example: >>> with collect_runs() as runs_cb: chain.invoke("foo") run_id = runs_cb.traced_runs[0].id """ cb = RunCollectorCallbackHandler() run_collector_var.set(cb) yield cb run_collector_var.set(None) def _get_trace_callbacks( project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, callback_manager: Optional[Union[CallbackManager, AsyncCallbackManager]] = None, ) -> Callbacks: if _tracing_v2_is_enabled(): project_name_ = project_name or _get_tracer_project() tracer = tracing_v2_callback_var.get() or LangChainTracer( project_name=project_name_, example_id=example_id, ) if callback_manager is None: from libs.core.langchain_core.callbacks.base import Callbacks cb = cast(Callbacks, [tracer]) else: if not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): callback_manager.add_handler(tracer, True) # If it already has a LangChainTracer, we don't need to add another one. # this would likely mess up the trace hierarchy. cb = callback_manager else: cb = None return cb def _tracing_v2_is_enabled() -> bool: return ( env_var_is_set("LANGCHAIN_TRACING_V2") or tracing_v2_callback_var.get() is not None or get_run_tree_context() is not None or env_var_is_set("LANGCHAIN_TRACING") ) def _get_tracer_project() -> str: run_tree = get_run_tree_context() return getattr( run_tree, "session_name", getattr( # Note, if people are trying to nest @traceable functions and the # tracing_v2_enabled context manager, this will likely mess up the # tree structure. tracing_v2_callback_var.get(), "project", # Have to set this to a string even though it always will return # a string because `get_tracer_project` technically can return # None, but only when a specific argument is supplied. # Therefore, this just tricks the mypy type checker str(ls_utils.get_tracer_project()), ), ) _configure_hooks: List[ Tuple[ ContextVar[Optional[BaseCallbackHandler]], bool, Optional[Type[BaseCallbackHandler]], Optional[str], ] ] = [] def register_configure_hook( context_var: ContextVar[Optional[Any]], inheritable: bool, handle_class: Optional[Type[BaseCallbackHandler]] = None, env_var: Optional[str] = None, ) -> None: """Register a configure hook. Args: context_var (ContextVar[Optional[Any]]): The context variable. inheritable (bool): Whether the context variable is inheritable. handle_class (Optional[Type[BaseCallbackHandler]], optional): The callback handler class. Defaults to None. env_var (Optional[str], optional): The environment variable. Defaults to None. Raises: ValueError: If env_var is set, handle_class must also be set to a non-None value. """ if env_var is not None and handle_class is None: raise ValueError( "If env_var is set, handle_class must also be set to a non-None value." ) from libs.core.langchain_core.callbacks.base import BaseCallbackHandler _configure_hooks.append( ( # the typings of ContextVar do not have the generic arg set as covariant # so we have to cast it cast(ContextVar[Optional[BaseCallbackHandler]], context_var), inheritable, handle_class, env_var, ) ) register_configure_hook(run_collector_var, False)
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~combine_documents~map_rerank.py
"""Combining documents by mapping a chain over them first, then reranking results.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union, cast from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, create_model, root_validator from libs.core.langchain_core.runnables.config import RunnableConfig from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.llm import LLMChain from langchain.output_parsers.regex import RegexParser class MapRerankDocumentsChain(BaseCombineDocumentsChain): """Combining documents by mapping a chain over them, then reranking results. This algorithm calls an LLMChain on each input document. The LLMChain is expected to have an OutputParser that parses the result into both an answer (`answer_key`) and a score (`rank_key`). The answer with the highest score is then returned. Example: .. code-block:: python from langchain.chains import StuffDocumentsChain, LLMChain from libs.core.langchain_core.prompts import PromptTemplate from langchain.llms import OpenAI from langchain.output_parsers.regex import RegexParser document_variable_name = "context" llm = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` # The actual prompt will need to be a lot more complex, this is just # an example. prompt_template = ( "Use the following context to tell me the chemical formula " "for water. Output both your answer and a score of how confident " "you are. Context: {content}" ) output_parser = RegexParser( regex=r"(.*?)\nScore: (.*)", output_keys=["answer", "score"], ) prompt = PromptTemplate( template=prompt_template, input_variables=["context"], output_parser=output_parser, ) llm_chain = LLMChain(llm=llm, prompt=prompt) chain = MapRerankDocumentsChain( llm_chain=llm_chain, document_variable_name=document_variable_name, rank_key="score", answer_key="answer", ) """ llm_chain: LLMChain """Chain to apply to each document individually.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" rank_key: str """Key in output of llm_chain to rank on.""" answer_key: str """Key in output of llm_chain to return as answer.""" metadata_keys: Optional[List[str]] = None """Additional metadata from the chosen document to return.""" return_intermediate_steps: bool = False """Return intermediate steps. Intermediate steps include the results of calling llm_chain on each document.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def get_output_schema( self, config: Optional[RunnableConfig] = None ) -> Type[BaseModel]: schema: Dict[str, Any] = { self.output_key: (str, None), } if self.return_intermediate_steps: schema["intermediate_steps"] = (List[str], None) if self.metadata_keys: schema.update({key: (Any, None) for key in self.metadata_keys}) return create_model("MapRerankOutput", **schema) @property def output_keys(self) -> List[str]: """Expect input key. :meta private: """ _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = _output_keys + ["intermediate_steps"] if self.metadata_keys is not None: _output_keys += self.metadata_keys return _output_keys @root_validator() def validate_llm_output(cls, values: Dict) -> Dict: """Validate that the combine chain outputs a dictionary.""" output_parser = values["llm_chain"].prompt.output_parser if not isinstance(output_parser, RegexParser): raise ValueError( "Output parser of llm_chain should be a RegexParser," f" got {output_parser}" ) output_keys = output_parser.output_keys if values["rank_key"] not in output_keys: raise ValueError( f"Got {values['rank_key']} as key to rank on, but did not find " f"it in the llm_chain output keys ({output_keys})" ) if values["answer_key"] not in output_keys: raise ValueError( f"Got {values['answer_key']} as key to return, but did not find " f"it in the llm_chain output keys ({output_keys})" ) return values @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: llm_chain_variables = values["llm_chain"].prompt.input_variables if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain input_variables" ) else: llm_chain_variables = values["llm_chain"].prompt.input_variables if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values def combine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Combine documents in a map rerank manner. Combine by mapping first chain over all documents, then reranking the results. Args: docs: List of documents to combine callbacks: Callbacks to be passed through **kwargs: additional parameters to be passed to LLM calls (like other input variables besides the documents) Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ results = self.llm_chain.apply_and_parse( # FYI - this is parallelized and so it is fast. [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], callbacks=callbacks, ) return self._process_results(docs, results) async def acombine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Combine documents in a map rerank manner. Combine by mapping first chain over all documents, then reranking the results. Args: docs: List of documents to combine callbacks: Callbacks to be passed through **kwargs: additional parameters to be passed to LLM calls (like other input variables besides the documents) Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ results = await self.llm_chain.aapply_and_parse( # FYI - this is parallelized and so it is fast. [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], callbacks=callbacks, ) return self._process_results(docs, results) def _process_results( self, docs: List[Document], results: Sequence[Union[str, List[str], Dict[str, str]]], ) -> Tuple[str, dict]: typed_results = cast(List[dict], results) sorted_res = sorted( zip(typed_results, docs), key=lambda x: -int(x[0][self.rank_key]) ) output, document = sorted_res[0] extra_info = {} if self.metadata_keys is not None: for key in self.metadata_keys: extra_info[key] = document.metadata[key] if self.return_intermediate_steps: extra_info["intermediate_steps"] = results return output[self.answer_key], extra_info @property def _chain_type(self) -> str: return "map_rerank_documents_chain"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~deepsparse.py
# flake8: noqa from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union from libs.core.langchain_core.pydantic_v1 import root_validator from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import LLM from langchain_community.llms.utils import enforce_stop_tokens from libs.core.langchain_core.outputs import GenerationChunk class DeepSparse(LLM): """Neural Magic DeepSparse LLM interface. To use, you should have the ``deepsparse`` or ``deepsparse-nightly`` python package installed. See https://github.com/neuralmagic/deepsparse This interface let's you deploy optimized LLMs straight from the [SparseZoo](https://sparsezoo.neuralmagic.com/?useCase=text_generation) Example: .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none") """ # noqa: E501 pipeline: Any #: :meta private: model: str """The path to a model file or directory or the name of a SparseZoo model stub.""" model_config: Optional[Dict[str, Any]] = None """Keyword arguments passed to the pipeline construction. Common parameters are sequence_length, prompt_sequence_length""" generation_config: Union[None, str, Dict] = None """GenerationConfig dictionary consisting of parameters used to control sequences generated for each prompt. Common parameters are: max_length, max_new_tokens, num_return_sequences, output_scores, top_p, top_k, repetition_penalty.""" streaming: bool = False """Whether to stream the results, token by token.""" @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "model": self.model, "model_config": self.model_config, "generation_config": self.generation_config, "streaming": self.streaming, } @property def _llm_type(self) -> str: """Return type of llm.""" return "deepsparse" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that ``deepsparse`` package is installed.""" try: from deepsparse import Pipeline except ImportError: raise ImportError( "Could not import `deepsparse` package. " "Please install it with `pip install deepsparse[llm]`" ) model_config = values["model_config"] or {} values["pipeline"] = Pipeline.create( task="text_generation", model_path=values["model"], **model_config, ) return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none") llm("Tell me a joke.") """ if self.streaming: combined_output = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs ): combined_output += chunk.text text = combined_output else: text = ( self.pipeline(sequences=prompt, **self.generation_config) .generations[0] .text ) if stop is not None: text = enforce_stop_tokens(text, stop) return text async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none") llm("Tell me a joke.") """ if self.streaming: combined_output = "" async for chunk in self._astream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs ): combined_output += chunk.text text = combined_output else: text = ( self.pipeline(sequences=prompt, **self.generation_config) .generations[0] .text ) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like object containing a string token. Example: .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse( model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none", streaming=True ) for chunk in llm.stream("Tell me a joke", stop=["'","\n"]): print(chunk, end='', flush=True) """ inference = self.pipeline( sequences=prompt, streaming=True, **self.generation_config ) for token in inference: chunk = GenerationChunk(text=token.generations[0].text) yield chunk if run_manager: run_manager.on_llm_new_token(token=chunk.text) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like object containing a string token. Example: .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse( model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none", streaming=True ) for chunk in llm.stream("Tell me a joke", stop=["'","\n"]): print(chunk, end='', flush=True) """ inference = self.pipeline( sequences=prompt, streaming=True, **self.generation_config ) for token in inference: chunk = GenerationChunk(text=token.generations[0].text) yield chunk if run_manager: await run_manager.on_llm_new_token(token=chunk.text)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~pai_eas_endpoint.py
import json import logging from typing import Any, AsyncIterator, Dict, List, Optional, cast import requests from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, ChatMessage, HumanMessage, SystemMessage, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from libs.core.langchain_core.pydantic_v1 import root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) class PaiEasChatEndpoint(BaseChatModel): """Eas LLM Service chat model API. To use, must have a deployed eas chat llm service on AliCloud. One can set the environment variable ``eas_service_url`` and ``eas_service_token`` set with your eas service url and service token. Example: .. code-block:: python from langchain_community.chat_models import PaiEasChatEndpoint eas_chat_endpoint = PaiEasChatEndpoint( eas_service_url="your_service_url", eas_service_token="your_service_token" ) """ """PAI-EAS Service URL""" eas_service_url: str """PAI-EAS Service TOKEN""" eas_service_token: str """PAI-EAS Service Infer Params""" max_new_tokens: Optional[int] = 512 temperature: Optional[float] = 0.8 top_p: Optional[float] = 0.1 top_k: Optional[int] = 10 do_sample: Optional[bool] = False use_cache: Optional[bool] = True stop_sequences: Optional[List[str]] = None """Enable stream chat mode.""" streaming: bool = False """Key/value arguments to pass to the model. Reserved for future use""" model_kwargs: Optional[dict] = None version: Optional[str] = "2.0" timeout: Optional[int] = 5000 @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["eas_service_url"] = get_from_dict_or_env( values, "eas_service_url", "EAS_SERVICE_URL" ) values["eas_service_token"] = get_from_dict_or_env( values, "eas_service_token", "EAS_SERVICE_TOKEN" ) return values @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { "eas_service_url": self.eas_service_url, "eas_service_token": self.eas_service_token, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "pai_eas_chat_endpoint" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_new_tokens": self.max_new_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "stop_sequences": [], "do_sample": self.do_sample, "use_cache": self.use_cache, } def _invocation_params( self, stop_sequences: Optional[List[str]], **kwargs: Any ) -> dict: params = self._default_params if self.model_kwargs: params.update(self.model_kwargs) if self.stop_sequences is not None and stop_sequences is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop_sequences is not None: params["stop"] = self.stop_sequences else: params["stop"] = stop_sequences return {**params, **kwargs} def format_request_payload( self, messages: List[BaseMessage], **model_kwargs: Any ) -> dict: prompt: Dict[str, Any] = {} user_content: List[str] = [] assistant_content: List[str] = [] for message in messages: """Converts message to a dict according to role""" content = cast(str, message.content) if isinstance(message, HumanMessage): user_content = user_content + [content] elif isinstance(message, AIMessage): assistant_content = assistant_content + [content] elif isinstance(message, SystemMessage): prompt["system_prompt"] = content elif isinstance(message, ChatMessage) and message.role in [ "user", "assistant", "system", ]: if message.role == "system": prompt["system_prompt"] = content elif message.role == "user": user_content = user_content + [content] elif message.role == "assistant": assistant_content = assistant_content + [content] else: supported = ",".join([role for role in ["user", "assistant", "system"]]) raise ValueError( f"""Received unsupported role. Supported roles for the LLaMa Foundation Model: {supported}""" ) prompt["prompt"] = user_content[len(user_content) - 1] history = [ history_item for _, history_item in enumerate(zip(user_content[:-1], assistant_content)) ] prompt["history"] = history return {**prompt, **model_kwargs} def _format_response_payload( self, output: bytes, stop_sequences: Optional[List[str]] ) -> str: """Formats response""" try: text = json.loads(output)["response"] if stop_sequences: text = enforce_stop_tokens(text, stop_sequences) return text except Exception as e: if isinstance(e, json.decoder.JSONDecodeError): return output.decode("utf-8") raise e def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs) message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: params = self._invocation_params(stop, **kwargs) request_payload = self.format_request_payload(messages, **params) response_payload = self._call_eas(request_payload) generated_text = self._format_response_payload(response_payload, params["stop"]) if run_manager: run_manager.on_llm_new_token(generated_text) return generated_text def _call_eas(self, query_body: dict) -> Any: """Generate text from the eas service.""" headers = { "Content-Type": "application/json", "Accept": "application/json", "Authorization": f"{self.eas_service_token}", } # make request response = requests.post( self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout ) if response.status_code != 200: raise Exception( f"Request failed with status code {response.status_code}" f" and message {response.text}" ) return response.text def _call_eas_stream(self, query_body: dict) -> Any: """Generate text from the eas service.""" headers = { "Content-Type": "application/json", "Accept": "application/json", "Authorization": f"{self.eas_service_token}", } # make request response = requests.post( self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout ) if response.status_code != 200: raise Exception( f"Request failed with status code {response.status_code}" f" and message {response.text}" ) return response def _convert_chunk_to_message_message( self, chunk: str, ) -> AIMessageChunk: data = json.loads(chunk.encode("utf-8")) return AIMessageChunk(content=data.get("response", "")) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: params = self._invocation_params(stop, **kwargs) request_payload = self.format_request_payload(messages, **params) request_payload["use_stream_chat"] = True response = self._call_eas_stream(request_payload) for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\0" ): if chunk: content = self._convert_chunk_to_message_message(chunk) # identify stop sequence in generated text, if any stop_seq_found: Optional[str] = None for stop_seq in params["stop"]: if stop_seq in content.content: stop_seq_found = stop_seq # identify text to yield text: Optional[str] = None if stop_seq_found: content.content = content.content[ : content.content.index(stop_seq_found) ] # yield text, if any if text: if run_manager: await run_manager.on_llm_new_token(cast(str, content.content)) yield ChatGenerationChunk(message=content) # break if stop sequence found if stop_seq_found: break
[ "{}" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~hippo.py
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from transwarp_hippo_api.hippo_client import HippoClient # Default connection DEFAULT_HIPPO_CONNECTION = { "host": "localhost", "port": "7788", "username": "admin", "password": "admin", } logger = logging.getLogger(__name__) class Hippo(VectorStore): """`Hippo` vector store. You need to install `hippo-api` and run Hippo. Please visit our official website for how to run a Hippo instance: https://www.transwarp.cn/starwarp Args: embedding_function (Embeddings): Function used to embed the text. table_name (str): Which Hippo table to use. Defaults to "test". database_name (str): Which Hippo database to use. Defaults to "default". number_of_shards (int): The number of shards for the Hippo table.Defaults to 1. number_of_replicas (int): The number of replicas for the Hippo table.Defaults to 1. connection_args (Optional[dict[str, any]]): The connection args used for this class comes in the form of a dict. index_params (Optional[dict]): Which index params to use. Defaults to IVF_FLAT. drop_old (Optional[bool]): Whether to drop the current collection. Defaults to False. primary_field (str): Name of the primary key field. Defaults to "pk". text_field (str): Name of the text field. Defaults to "text". vector_field (str): Name of the vector field. Defaults to "vector". The connection args used for this class comes in the form of a dict, here are a few of the options: host (str): The host of Hippo instance. Default at "localhost". port (str/int): The port of Hippo instance. Default at 7788. user (str): Use which user to connect to Hippo instance. If user and password are provided, we will add related header in every RPC call. password (str): Required when user is provided. The password corresponding to the user. Example: .. code-block:: python from langchain_community.vectorstores import Hippo from langchain_community.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() # Connect to a hippo instance on localhost vector_store = Hippo.from_documents( docs, embedding=embeddings, table_name="langchain_test", connection_args=HIPPO_CONNECTION ) Raises: ValueError: If the hippo-api python package is not installed. """ def __init__( self, embedding_function: Embeddings, table_name: str = "test", database_name: str = "default", number_of_shards: int = 1, number_of_replicas: int = 1, connection_args: Optional[Dict[str, Any]] = None, index_params: Optional[dict] = None, drop_old: Optional[bool] = False, ): self.number_of_shards = number_of_shards self.number_of_replicas = number_of_replicas self.embedding_func = embedding_function self.table_name = table_name self.database_name = database_name self.index_params = index_params # In order for a collection to be compatible, # 'pk' should be an auto-increment primary key and string self._primary_field = "pk" # In order for compatibility, the text field will need to be called "text" self._text_field = "text" # In order for compatibility, the vector field needs to be called "vector" self._vector_field = "vector" self.fields: List[str] = [] # Create the connection to the server if connection_args is None: connection_args = DEFAULT_HIPPO_CONNECTION self.hc = self._create_connection_alias(connection_args) self.col: Any = None # If the collection exists, delete it try: if ( self.hc.check_table_exists(self.table_name, self.database_name) and drop_old ): self.hc.delete_table(self.table_name, self.database_name) except Exception as e: logging.error( f"An error occurred while deleting the table " f"{self.table_name}: {e}" ) raise try: if self.hc.check_table_exists(self.table_name, self.database_name): self.col = self.hc.get_table(self.table_name, self.database_name) except Exception as e: logging.error( f"An error occurred while getting the table " f"{self.table_name}: {e}" ) raise # Initialize the vector database self._get_env() def _create_connection_alias(self, connection_args: dict) -> HippoClient: """Create the connection to the Hippo server.""" # Grab the connection arguments that are used for checking existing connection try: from transwarp_hippo_api.hippo_client import HippoClient except ImportError as e: raise ImportError( "Unable to import transwarp_hipp_api, please install with " "`pip install hippo-api`." ) from e host: str = connection_args.get("host", None) port: int = connection_args.get("port", None) username: str = connection_args.get("username", "shiva") password: str = connection_args.get("password", "shiva") # Order of use is host/port, uri, address if host is not None and port is not None: if "," in host: hosts = host.split(",") given_address = ",".join([f"{h}:{port}" for h in hosts]) else: given_address = str(host) + ":" + str(port) else: raise ValueError("Missing standard address type for reuse attempt") try: logger.info(f"create HippoClient[{given_address}]") return HippoClient([given_address], username=username, pwd=password) except Exception as e: logger.error("Failed to create new connection") raise e def _get_env( self, embeddings: Optional[list] = None, metadatas: Optional[List[dict]] = None ) -> None: logger.info("init ...") if embeddings is not None: logger.info("create collection") self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index() def _create_collection( self, embeddings: list, metadatas: Optional[List[dict]] = None ) -> None: from transwarp_hippo_api.hippo_client import HippoField from transwarp_hippo_api.hippo_type import HippoType # Determine embedding dim dim = len(embeddings[0]) logger.debug(f"[_create_collection] dim: {dim}") fields = [] # Create the primary key field fields.append(HippoField(self._primary_field, True, HippoType.STRING)) # Create the text field fields.append(HippoField(self._text_field, False, HippoType.STRING)) # Create the vector field, supports binary or float vectors # to The binary vector type is to be developed. fields.append( HippoField( self._vector_field, False, HippoType.FLOAT_VECTOR, type_params={"dimension": dim}, ) ) # to In Hippo,there is no method similar to the infer_type_data # types, so currently all non-vector data is converted to string type. if metadatas: # # Create FieldSchema for each entry in metadata. for key, value in metadatas[0].items(): # # Infer the corresponding datatype of the metadata if isinstance(value, list): value_dim = len(value) fields.append( HippoField( key, False, HippoType.FLOAT_VECTOR, type_params={"dimension": value_dim}, ) ) else: fields.append(HippoField(key, False, HippoType.STRING)) logger.debug(f"[_create_collection] fields: {fields}") # Create the collection self.hc.create_table( name=self.table_name, auto_id=True, fields=fields, database_name=self.database_name, number_of_shards=self.number_of_shards, number_of_replicas=self.number_of_replicas, ) self.col = self.hc.get_table(self.table_name, self.database_name) logger.info( f"[_create_collection] : " f"create table {self.table_name} in {self.database_name} successfully" ) def _extract_fields(self) -> None: """Grab the existing fields from the Collection""" from transwarp_hippo_api.hippo_client import HippoTable if isinstance(self.col, HippoTable): schema = self.col.schema logger.debug(f"[_extract_fields] schema:{schema}") for x in schema: self.fields.append(x.name) logger.debug(f"04 [_extract_fields] fields:{self.fields}") # TO CAN: Translated into English, your statement would be: "Currently, # only the field named 'vector' (the automatically created vector field) # is checked for indexing. Indexes need to be created manually for other # vector type columns. def _get_index(self) -> Optional[Dict[str, Any]]: """Return the vector index information if it exists""" from transwarp_hippo_api.hippo_client import HippoTable if isinstance(self.col, HippoTable): table_info = self.hc.get_table_info( self.table_name, self.database_name ).get(self.table_name, {}) embedding_indexes = table_info.get("embedding_indexes", None) if embedding_indexes is None: return None else: for x in self.hc.get_table_info(self.table_name, self.database_name)[ self.table_name ]["embedding_indexes"]: logger.debug(f"[_get_index] embedding_indexes {embedding_indexes}") if x["column"] == self._vector_field: return x return None # TO Indexes can only be created for the self._vector_field field. def _create_index(self) -> None: """Create a index on the collection""" from transwarp_hippo_api.hippo_client import HippoTable from transwarp_hippo_api.hippo_type import IndexType, MetricType if isinstance(self.col, HippoTable) and self._get_index() is None: if self._get_index() is None: if self.index_params is None: self.index_params = { "index_name": "langchain_auto_create", "metric_type": MetricType.L2, "index_type": IndexType.IVF_FLAT, "nlist": 10, } self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params["nlist"], ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) logger.info("create index successfully") else: index_dict = { "IVF_FLAT": IndexType.IVF_FLAT, "FLAT": IndexType.FLAT, "IVF_SQ": IndexType.IVF_SQ, "IVF_PQ": IndexType.IVF_PQ, "HNSW": IndexType.HNSW, } metric_dict = { "ip": MetricType.IP, "IP": MetricType.IP, "l2": MetricType.L2, "L2": MetricType.L2, } self.index_params["metric_type"] = metric_dict[ self.index_params["metric_type"] ] if self.index_params["index_type"] == "FLAT": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif ( self.index_params["index_type"] == "IVF_FLAT" or self.index_params["index_type"] == "IVF_SQ" ): self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params.get("nlist", 10), nprobe=self.index_params.get("nprobe", 10), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif self.index_params["index_type"] == "IVF_PQ": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params.get("nlist", 10), nprobe=self.index_params.get("nprobe", 10), nbits=self.index_params.get("nbits", 8), m=self.index_params.get("m"), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif self.index_params["index_type"] == "HNSW": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], M=self.index_params.get("M"), ef_construction=self.index_params.get("ef_construction"), ef_search=self.index_params.get("ef_search"), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) else: raise ValueError( "Index name does not match, " "please enter the correct index name. " "(FLAT, IVF_FLAT, IVF_PQ,IVF_SQ, HNSW)" ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """ Add text to the collection. Args: texts: An iterable that contains the text to be added. metadatas: An optional list of dictionaries, each dictionary contains the metadata associated with a text. timeout: Optional timeout, in seconds. batch_size: The number of texts inserted in each batch, defaults to 1000. **kwargs: Other optional parameters. Returns: A list of strings, containing the unique identifiers of the inserted texts. Note: If the collection has not yet been created, this method will create a new collection. """ from transwarp_hippo_api.hippo_client import HippoTable if not texts or all(t == "" for t in texts): logger.debug("Nothing to insert, skipping.") return [] texts = list(texts) logger.debug(f"[add_texts] texts: {texts}") try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] logger.debug(f"[add_texts] len_embeddings:{len(embeddings)}") # 如果还没有创建collection则创建collection if not isinstance(self.col, HippoTable): self._get_env(embeddings, metadatas) # Dict to hold all insert columns insert_dict: Dict[str, list] = { self._text_field: texts, self._vector_field: embeddings, } logger.debug(f"[add_texts] metadatas:{metadatas}") logger.debug(f"[add_texts] fields:{self.fields}") if metadatas is not None: for d in metadatas: for key, value in d.items(): if key in self.fields: insert_dict.setdefault(key, []).append(value) logger.debug(insert_dict[self._text_field]) # Total insert count vectors: list = insert_dict[self._vector_field] total_count = len(vectors) if "pk" in self.fields: self.fields.remove("pk") logger.debug(f"[add_texts] total_count:{total_count}") for i in range(0, total_count, batch_size): # Grab end index end = min(i + batch_size, total_count) # Convert dict to list of lists batch for insertion insert_list = [insert_dict[x][i:end] for x in self.fields] try: res = self.col.insert_rows(insert_list) logger.info(f"05 [add_texts] insert {res}") except Exception as e: logger.error( "Failed to insert batch starting at entity: %s/%s", i, total_count ) raise e return [""] def similarity_search( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """ Perform a similarity search on the query string. Args: query (str): The text to search for. k (int, optional): The number of results to return. Default is 4. param (dict, optional): Specifies the search parameters for the index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): Time to wait before a timeout error. Defaults to None. kwargs: Keyword arguments for Collection.search(). Returns: List[Document]: The document results of the search. """ if self.col is None: logger.debug("No existing collection to search.") return [] res = self.similarity_search_with_score( query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in res] def similarity_search_with_score( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Performs a search on the query string and returns results with scores. Args: query (str): The text being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug("No existing collection to search.") return [] # Embed the query text. embedding = self.embedding_func.embed_query(query) ret = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return ret def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Performs a search on the query string and returns results with scores. Args: embedding (List[float]): The embedding vector being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[Tuple[Document, float]]: Resulting documents and scores. """ if self.col is None: logger.debug("No existing collection to search.") return [] # if param is None: # param = self.search_params # Determine result metadata fields. output_fields = self.fields[:] output_fields.remove(self._vector_field) # Perform the search. logger.debug(f"search_field:{self._vector_field}") logger.debug(f"vectors:{[embedding]}") logger.debug(f"output_fields:{output_fields}") logger.debug(f"topk:{k}") logger.debug(f"dsl:{expr}") res = self.col.query( search_field=self._vector_field, vectors=[embedding], output_fields=output_fields, topk=k, dsl=expr, ) # Organize results. logger.debug(f"[similarity_search_with_score_by_vector] res:{res}") score_col = self._text_field + "%scores" ret = [] count = 0 for items in zip(*[res[0][field] for field in output_fields]): meta = {field: value for field, value in zip(output_fields, items)} doc = Document(page_content=meta.pop(self._text_field), metadata=meta) logger.debug( f"[similarity_search_with_score_by_vector] " f"res[0][score_col]:{res[0][score_col]}" ) score = res[0][score_col][count] count += 1 ret.append((doc, score)) return ret @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, table_name: str = "test", database_name: str = "default", connection_args: Dict[str, Any] = DEFAULT_HIPPO_CONNECTION, index_params: Optional[Dict[Any, Any]] = None, search_params: Optional[Dict[str, Any]] = None, drop_old: bool = False, **kwargs: Any, ) -> "Hippo": """ Creates an instance of the VST class from the given texts. Args: texts (List[str]): List of texts to be added. embedding (Embeddings): Embedding model for the texts. metadatas (List[dict], optional): List of metadata dictionaries for each text.Defaults to None. table_name (str): Name of the table. Defaults to "test". database_name (str): Name of the database. Defaults to "default". connection_args (dict[str, Any]): Connection parameters. Defaults to DEFAULT_HIPPO_CONNECTION. index_params (dict): Indexing parameters. Defaults to None. search_params (dict): Search parameters. Defaults to an empty dictionary. drop_old (bool): Whether to drop the old collection. Defaults to False. kwargs: Other arguments. Returns: Hippo: An instance of the VST class. """ if search_params is None: search_params = {} logger.info("00 [from_texts] init the class of Hippo") vector_db = cls( embedding_function=embedding, table_name=table_name, database_name=database_name, connection_args=connection_args, index_params=index_params, drop_old=drop_old, **kwargs, ) logger.debug(f"[from_texts] texts:{texts}") logger.debug(f"[from_texts] metadatas:{metadatas}") vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~docarray~hnsw.py
from __future__ import annotations from typing import Any, List, Literal, Optional from libs.core.langchain_core.embeddings import Embeddings from langchain_community.vectorstores.docarray.base import ( DocArrayIndex, _check_docarray_import, ) class DocArrayHnswSearch(DocArrayIndex): """`HnswLib` storage using `DocArray` package. To use it, you should have the ``docarray`` package with version >=0.32.0 installed. You can install it with `pip install "langchain[docarray]"`. """ @classmethod def from_params( cls, embedding: Embeddings, work_dir: str, n_dim: int, dist_metric: Literal["cosine", "ip", "l2"] = "cosine", max_elements: int = 1024, index: bool = True, ef_construction: int = 200, ef: int = 10, M: int = 16, allow_replace_deleted: bool = True, num_threads: int = 1, **kwargs: Any, ) -> DocArrayHnswSearch: """Initialize DocArrayHnswSearch store. Args: embedding (Embeddings): Embedding function. work_dir (str): path to the location where all the data will be stored. n_dim (int): dimension of an embedding. dist_metric (str): Distance metric for DocArrayHnswSearch can be one of: "cosine", "ip", and "l2". Defaults to "cosine". max_elements (int): Maximum number of vectors that can be stored. Defaults to 1024. index (bool): Whether an index should be built for this field. Defaults to True. ef_construction (int): defines a construction time/accuracy trade-off. Defaults to 200. ef (int): parameter controlling query time/accuracy trade-off. Defaults to 10. M (int): parameter that defines the maximum number of outgoing connections in the graph. Defaults to 16. allow_replace_deleted (bool): Enables replacing of deleted elements with new added ones. Defaults to True. num_threads (int): Sets the number of cpu threads to use. Defaults to 1. **kwargs: Other keyword arguments to be passed to the get_doc_cls method. """ _check_docarray_import() from docarray.index import HnswDocumentIndex doc_cls = cls._get_doc_cls( dim=n_dim, space=dist_metric, max_elements=max_elements, index=index, ef_construction=ef_construction, ef=ef, M=M, allow_replace_deleted=allow_replace_deleted, num_threads=num_threads, **kwargs, ) doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir) # type: ignore return cls(doc_index, embedding) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, work_dir: Optional[str] = None, n_dim: Optional[int] = None, **kwargs: Any, ) -> DocArrayHnswSearch: """Create an DocArrayHnswSearch store and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. work_dir (str): path to the location where all the data will be stored. n_dim (int): dimension of an embedding. **kwargs: Other keyword arguments to be passed to the __init__ method. Returns: DocArrayHnswSearch Vector Store """ if work_dir is None: raise ValueError("`work_dir` parameter has not been set.") if n_dim is None: raise ValueError("`n_dim` parameter has not been set.") store = cls.from_params(embedding, work_dir, n_dim, **kwargs) store.add_texts(texts=texts, metadatas=metadatas) return store
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~retrievers~outline.py
from typing import List from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun from libs.core.langchain_core.documents import Document from libs.core.langchain_core.retrievers import BaseRetriever from langchain_community.utilities.outline import OutlineAPIWrapper class OutlineRetriever(BaseRetriever, OutlineAPIWrapper): """Retriever for Outline API. It wraps run() to get_relevant_documents(). It uses all OutlineAPIWrapper arguments without any change. """ def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: return self.run(query=query)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~airtable.py
from typing import Iterator, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class AirtableLoader(BaseLoader): """Load the `Airtable` tables.""" def __init__(self, api_token: str, table_id: str, base_id: str): """Initialize with API token and the IDs for table and base""" self.api_token = api_token """Airtable API token.""" self.table_id = table_id """Airtable table ID.""" self.base_id = base_id """Airtable base ID.""" def lazy_load(self) -> Iterator[Document]: """Lazy load Documents from table.""" from pyairtable import Table table = Table(self.api_token, self.base_id, self.table_id) records = table.all() for record in records: # Need to convert record from dict to str yield Document( page_content=str(record), metadata={ "source": self.base_id + "_" + self.table_id, "base_id": self.base_id, "table_id": self.table_id, }, ) def load(self) -> List[Document]: """Load Documents from table.""" return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~pyspark_dataframe.py
import itertools import logging import sys from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Tuple from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader logger = logging.getLogger(__file__) if TYPE_CHECKING: from pyspark.sql import SparkSession class PySparkDataFrameLoader(BaseLoader): """Load `PySpark` DataFrames.""" def __init__( self, spark_session: Optional["SparkSession"] = None, df: Optional[Any] = None, page_content_column: str = "text", fraction_of_memory: float = 0.1, ): """Initialize with a Spark DataFrame object. Args: spark_session: The SparkSession object. df: The Spark DataFrame object. page_content_column: The name of the column containing the page content. Defaults to "text". fraction_of_memory: The fraction of memory to use. Defaults to 0.1. """ try: from pyspark.sql import DataFrame, SparkSession except ImportError: raise ImportError( "pyspark is not installed. " "Please install it with `pip install pyspark`" ) self.spark = ( spark_session if spark_session else SparkSession.builder.getOrCreate() ) if not isinstance(df, DataFrame): raise ValueError( f"Expected data_frame to be a PySpark DataFrame, got {type(df)}" ) self.df = df self.page_content_column = page_content_column self.fraction_of_memory = fraction_of_memory self.num_rows, self.max_num_rows = self.get_num_rows() self.rdd_df = self.df.rdd.map(list) self.column_names = self.df.columns def get_num_rows(self) -> Tuple[int, int]: """Gets the number of "feasible" rows for the DataFrame""" try: import psutil except ImportError as e: raise ImportError( "psutil not installed. Please install it with `pip install psutil`." ) from e row = self.df.limit(1).collect()[0] estimated_row_size = sys.getsizeof(row) mem_info = psutil.virtual_memory() available_memory = mem_info.available max_num_rows = int( (available_memory / estimated_row_size) * self.fraction_of_memory ) return min(max_num_rows, self.df.count()), max_num_rows def lazy_load(self) -> Iterator[Document]: """A lazy loader for document content.""" for row in self.rdd_df.toLocalIterator(): metadata = {self.column_names[i]: row[i] for i in range(len(row))} text = metadata[self.page_content_column] metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata) def load(self) -> List[Document]: """Load from the dataframe.""" if self.df.count() > self.max_num_rows: logger.warning( f"The number of DataFrame rows is {self.df.count()}, " f"but we will only include the amount " f"of rows that can reasonably fit in memory: {self.num_rows}." ) lazy_load_iterator = self.lazy_load() return list(itertools.islice(lazy_load_iterator, self.num_rows))
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~semadb.py
from typing import Any, Iterable, List, Optional, Tuple from uuid import uuid4 import numpy as np import requests from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils import get_from_env from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import DistanceStrategy class SemaDB(VectorStore): """`SemaDB` vector store. This vector store is a wrapper around the SemaDB database. Example: .. code-block:: python from langchain_community.vectorstores import SemaDB db = SemaDB('mycollection', 768, embeddings, DistanceStrategy.COSINE) """ HOST = "semadb.p.rapidapi.com" BASE_URL = "https://" + HOST def __init__( self, collection_name: str, vector_size: int, embedding: Embeddings, distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE, api_key: str = "", ): """initialize the SemaDB vector store.""" self.collection_name = collection_name self.vector_size = vector_size self.api_key = api_key or get_from_env("api_key", "SEMADB_API_KEY") self._embedding = embedding self.distance_strategy = distance_strategy @property def headers(self) -> dict: """Return the common headers.""" return { "content-type": "application/json", "X-RapidAPI-Key": self.api_key, "X-RapidAPI-Host": SemaDB.HOST, } def _get_internal_distance_strategy(self) -> str: """Return the internal distance strategy.""" if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return "euclidean" elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: raise ValueError("Max inner product is not supported by SemaDB") elif self.distance_strategy == DistanceStrategy.DOT_PRODUCT: return "dot" elif self.distance_strategy == DistanceStrategy.JACCARD: raise ValueError("Max inner product is not supported by SemaDB") elif self.distance_strategy == DistanceStrategy.COSINE: return "cosine" else: raise ValueError(f"Unknown distance strategy {self.distance_strategy}") def create_collection(self) -> bool: """Creates the corresponding collection in SemaDB.""" payload = { "id": self.collection_name, "vectorSize": self.vector_size, "distanceMetric": self._get_internal_distance_strategy(), } response = requests.post( SemaDB.BASE_URL + "/collections", json=payload, headers=self.headers, ) return response.status_code == 200 def delete_collection(self) -> bool: """Deletes the corresponding collection in SemaDB.""" response = requests.delete( SemaDB.BASE_URL + f"/collections/{self.collection_name}", headers=self.headers, ) return response.status_code == 200 def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """Add texts to the vector store.""" if not isinstance(texts, list): texts = list(texts) embeddings = self._embedding.embed_documents(texts) # Check dimensions if len(embeddings[0]) != self.vector_size: raise ValueError( f"Embedding size mismatch {len(embeddings[0])} != {self.vector_size}" ) # Normalise if needed if self.distance_strategy == DistanceStrategy.COSINE: embed_matrix = np.array(embeddings) embed_matrix = embed_matrix / np.linalg.norm( embed_matrix, axis=1, keepdims=True ) embeddings = embed_matrix.tolist() # Create points ids: List[str] = [] points = [] if metadatas is not None: for text, embedding, metadata in zip(texts, embeddings, metadatas): new_id = str(uuid4()) ids.append(new_id) points.append( { "id": new_id, "vector": embedding, "metadata": {**metadata, **{"text": text}}, } ) else: for text, embedding in zip(texts, embeddings): new_id = str(uuid4()) ids.append(new_id) points.append( { "id": new_id, "vector": embedding, "metadata": {"text": text}, } ) # Insert points in batches for i in range(0, len(points), batch_size): batch = points[i : i + batch_size] response = requests.post( SemaDB.BASE_URL + f"/collections/{self.collection_name}/points", json={"points": batch}, headers=self.headers, ) if response.status_code != 200: print("HERE--", batch) raise ValueError(f"Error adding points: {response.text}") failed_ranges = response.json()["failedRanges"] if len(failed_ranges) > 0: raise ValueError(f"Error adding points: {failed_ranges}") # Return ids return ids @property def embeddings(self) -> Embeddings: """Return the embeddings.""" return self._embedding def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ payload = { "ids": ids, } response = requests.delete( SemaDB.BASE_URL + f"/collections/{self.collection_name}/points", json=payload, headers=self.headers, ) return response.status_code == 200 and len(response.json()["failedPoints"]) == 0 def _search_points(self, embedding: List[float], k: int = 4) -> List[dict]: """Search points.""" # Normalise if needed if self.distance_strategy == DistanceStrategy.COSINE: vec = np.array(embedding) vec = vec / np.linalg.norm(vec) embedding = vec.tolist() # Perform search request payload = { "vector": embedding, "limit": k, } response = requests.post( SemaDB.BASE_URL + f"/collections/{self.collection_name}/points/search", json=payload, headers=self.headers, ) if response.status_code != 200: raise ValueError(f"Error searching: {response.text}") return response.json()["points"] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" query_embedding = self._embedding.embed_query(query) return self.similarity_search_by_vector(query_embedding, k=k) def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Run similarity search with distance.""" query_embedding = self._embedding.embed_query(query) points = self._search_points(query_embedding, k=k) return [ ( Document(page_content=p["metadata"]["text"], metadata=p["metadata"]), p["distance"], ) for p in points ] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ points = self._search_points(embedding, k=k) return [ Document(page_content=p["metadata"]["text"], metadata=p["metadata"]) for p in points ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "", vector_size: int = 0, api_key: str = "", distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE, **kwargs: Any, ) -> "SemaDB": """Return VectorStore initialized from texts and embeddings.""" if not collection_name: raise ValueError("Collection name must be provided") if not vector_size: raise ValueError("Vector size must be provided") if not api_key: raise ValueError("API key must be provided") semadb = cls( collection_name, vector_size, embedding, distance_strategy=distance_strategy, api_key=api_key, ) if not semadb.create_collection(): raise ValueError("Error creating collection") semadb.add_texts(texts, metadatas=metadatas) return semadb
[ "application/json" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~csv_loader.py
import csv from io import TextIOWrapper from typing import Any, Dict, List, Optional, Sequence from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.document_loaders.helpers import detect_file_encodings from langchain_community.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) class CSVLoader(BaseLoader): """Load a `CSV` file into a list of Documents. Each document represents one row of the CSV file. Every row is converted into a key/value pair and outputted to a new line in the document's page_content. The source for each document loaded from csv is set to the value of the `file_path` argument for all documents by default. You can override this by setting the `source_column` argument to the name of a column in the CSV file. The source of each document will then be set to the value of the column with the name specified in `source_column`. Output Example: .. code-block:: txt column1: value1 column2: value2 column3: value3 """ def __init__( self, file_path: str, source_column: Optional[str] = None, metadata_columns: Sequence[str] = (), csv_args: Optional[Dict] = None, encoding: Optional[str] = None, autodetect_encoding: bool = False, ): """ Args: file_path: The path to the CSV file. source_column: The name of the column in the CSV file to use as the source. Optional. Defaults to None. metadata_columns: A sequence of column names to use as metadata. Optional. csv_args: A dictionary of arguments to pass to the csv.DictReader. Optional. Defaults to None. encoding: The encoding of the CSV file. Optional. Defaults to None. autodetect_encoding: Whether to try to autodetect the file encoding. """ self.file_path = file_path self.source_column = source_column self.metadata_columns = metadata_columns self.encoding = encoding self.csv_args = csv_args or {} self.autodetect_encoding = autodetect_encoding def load(self) -> List[Document]: """Load data into document objects.""" docs = [] try: with open(self.file_path, newline="", encoding=self.encoding) as csvfile: docs = self.__read_file(csvfile) except UnicodeDecodeError as e: if self.autodetect_encoding: detected_encodings = detect_file_encodings(self.file_path) for encoding in detected_encodings: try: with open( self.file_path, newline="", encoding=encoding.encoding ) as csvfile: docs = self.__read_file(csvfile) break except UnicodeDecodeError: continue else: raise RuntimeError(f"Error loading {self.file_path}") from e except Exception as e: raise RuntimeError(f"Error loading {self.file_path}") from e return docs def __read_file(self, csvfile: TextIOWrapper) -> List[Document]: docs = [] csv_reader = csv.DictReader(csvfile, **self.csv_args) # type: ignore for i, row in enumerate(csv_reader): try: source = ( row[self.source_column] if self.source_column is not None else self.file_path ) except KeyError: raise ValueError( f"Source column '{self.source_column}' not found in CSV file." ) content = "\n".join( f"{k.strip()}: {v.strip() if v is not None else v}" for k, v in row.items() if k not in self.metadata_columns ) metadata = {"source": source, "row": i} for col in self.metadata_columns: try: metadata[col] = row[col] except KeyError: raise ValueError(f"Metadata column '{col}' not found in CSV file.") doc = Document(page_content=content, metadata=metadata) docs.append(doc) return docs class UnstructuredCSVLoader(UnstructuredFileLoader): """Load `CSV` files using `Unstructured`. Like other Unstructured loaders, UnstructuredCSVLoader can be used in both "single" and "elements" mode. If you use the loader in "elements" mode, the CSV file will be a single Unstructured Table element. If you use the loader in "elements" mode, an HTML representation of the table will be available in the "text_as_html" key in the document metadata. Examples -------- from langchain_community.document_loaders.csv_loader import UnstructuredCSVLoader loader = UnstructuredCSVLoader("stanley-cups.csv", mode="elements") docs = loader.load() """ def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): """ Args: file_path: The path to the CSV file. mode: The mode to use when loading the CSV file. Optional. Defaults to "single". **unstructured_kwargs: Keyword arguments to pass to unstructured. """ validate_unstructured_version(min_unstructured_version="0.6.8") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.csv import partition_csv return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~amadeus~toolkit.py
from __future__ import annotations from typing import TYPE_CHECKING, List from libs.core.langchain_core.pydantic_v1 import Field from langchain_community.agent_toolkits.base import BaseToolkit from langchain_community.tools import BaseTool from langchain_community.tools.amadeus.closest_airport import AmadeusClosestAirport from langchain_community.tools.amadeus.flight_search import AmadeusFlightSearch from langchain_community.tools.amadeus.utils import authenticate if TYPE_CHECKING: from amadeus import Client class AmadeusToolkit(BaseToolkit): """Toolkit for interacting with Amadeus which offers APIs for travel.""" client: Client = Field(default_factory=authenticate) class Config: """Pydantic config.""" arbitrary_types_allowed = True def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ AmadeusClosestAirport(), AmadeusFlightSearch(), ]
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~utilities~test_powerbi_api.py
"""Integration test for POWERBI API Wrapper.""" import pytest from libs.core.langchain_core.utils import get_from_env from langchain_community.utilities.powerbi import PowerBIDataset def azure_installed() -> bool: try: from azure.core.credentials import TokenCredential # noqa: F401 from azure.identity import DefaultAzureCredential # noqa: F401 return True except Exception as e: print(f"azure not installed, skipping test {e}") return False @pytest.mark.skipif(not azure_installed(), reason="requires azure package") def test_daxquery() -> None: from azure.identity import DefaultAzureCredential DATASET_ID = get_from_env("", "POWERBI_DATASET_ID") TABLE_NAME = get_from_env("", "POWERBI_TABLE_NAME") NUM_ROWS = get_from_env("", "POWERBI_NUMROWS") powerbi = PowerBIDataset( dataset_id=DATASET_ID, table_names=[TABLE_NAME], credential=DefaultAzureCredential(), ) output = powerbi.run(f'EVALUATE ROW("RowCount", COUNTROWS({TABLE_NAME}))') numrows = str(output["results"][0]["tables"][0]["rows"][0]["[RowCount]"]) assert NUM_ROWS == numrows
[]
2024-01-10
mth93/langchain
libs~community~tests~unit_tests~llms~test_forefrontai.py
"""Test ForeFrontAI LLM""" from typing import cast from libs.core.langchain_core.pydantic_v1 import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.forefrontai import ForefrontAI def test_forefrontai_api_key_is_secret_string() -> None: """Test that the API key is stored as a SecretStr.""" llm = ForefrontAI(forefrontai_api_key="secret-api-key", temperature=0.2) assert isinstance(llm.forefrontai_api_key, SecretStr) def test_forefrontai_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv("FOREFRONTAI_API_KEY", "secret-api-key") llm = ForefrontAI(temperature=0.2) print(llm.forefrontai_api_key, end="") captured = capsys.readouterr() assert captured.out == "**********" def test_forefrontai_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test that the API key is masked when passed via the constructor.""" llm = ForefrontAI( forefrontai_api_key="secret-api-key", temperature=0.2, ) print(llm.forefrontai_api_key, end="") captured = capsys.readouterr() assert captured.out == "**********" def test_forefrontai_uses_actual_secret_value_from_secretstr() -> None: """Test that the actual secret value is correctly retrieved.""" llm = ForefrontAI( forefrontai_api_key="secret-api-key", temperature=0.2, ) assert ( cast(SecretStr, llm.forefrontai_api_key).get_secret_value() == "secret-api-key" )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~word_document.py
"""Loads word documents.""" import os import tempfile from abc import ABC from typing import List from urllib.parse import urlparse import requests from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.document_loaders.unstructured import UnstructuredFileLoader class Docx2txtLoader(BaseLoader, ABC): """Load `DOCX` file using `docx2txt` and chunks at character level. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) # If the file is a web path, download it to a temporary file, and use that if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path self.temp_file = tempfile.NamedTemporaryFile() self.temp_file.write(r.content) self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None: if hasattr(self, "temp_file"): self.temp_file.close() def load(self) -> List[Document]: """Load given path as single page.""" import docx2txt return [ Document( page_content=docx2txt.process(self.file_path), metadata={"source": self.file_path}, ) ] @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) class UnstructuredWordDocumentLoader(UnstructuredFileLoader): """Load `Microsoft Word` file using `Unstructured`. Works with both .docx and .doc files. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain_community.document_loaders import UnstructuredWordDocumentLoader loader = UnstructuredWordDocumentLoader( "example.docx", mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition-docx """ def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.file_utils.filetype import FileType, detect_filetype unstructured_version = tuple( [int(x) for x in __unstructured_version__.split(".")] ) # NOTE(MthwRobinson) - magic will raise an import error if the libmagic # system dependency isn't installed. If it's not installed, we'll just # check the file extension try: import magic # noqa: F401 is_doc = detect_filetype(self.file_path) == FileType.DOC except ImportError: _, extension = os.path.splitext(str(self.file_path)) is_doc = extension == ".doc" if is_doc and unstructured_version < (0, 4, 11): raise ValueError( f"You are on unstructured version {__unstructured_version__}. " "Partitioning .doc files is only supported in unstructured>=0.4.11. " "Please upgrade the unstructured package and try again." ) if is_doc: from unstructured.partition.doc import partition_doc return partition_doc(filename=self.file_path, **self.unstructured_kwargs) else: from unstructured.partition.docx import partition_docx return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_message_histories~singlestoredb.py
import json import logging import re from typing import ( Any, List, ) from libs.core.langchain_core.chat_history import BaseChatMessageHistory from libs.core.langchain_core.messages import ( BaseMessage, message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) class SingleStoreDBChatMessageHistory(BaseChatMessageHistory): """Chat message history stored in a SingleStoreDB database.""" def __init__( self, session_id: str, *, table_name: str = "message_store", id_field: str = "id", session_id_field: str = "session_id", message_field: str = "message", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ): """Initialize with necessary components. Args: table_name (str, optional): Specifies the name of the table in use. Defaults to "message_store". id_field (str, optional): Specifies the name of the id field in the table. Defaults to "id". session_id_field (str, optional): Specifies the name of the session_id field in the table. Defaults to "session_id". message_field (str, optional): Specifies the name of the message field in the table. Defaults to "message". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="https://user:[email protected]:3306/database" ) Advanced Usage: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain_community.chat_message_histories import ( SingleStoreDBChatMessageHistory ) os.environ['SINGLESTOREDB_URL'] = 'me:[email protected]/my_db' message_history = SingleStoreDBChatMessageHistory("my-session") """ self.table_name = self._sanitize_input(table_name) self.session_id = self._sanitize_input(session_id) self.id_field = self._sanitize_input(id_field) self.session_id_field = self._sanitize_input(session_id_field) self.message_field = self._sanitize_input(message_field) # Pass the rest of the kwargs to the connection. self.connection_kwargs = kwargs # Add connection attributes to the connection kwargs. if "conn_attrs" not in self.connection_kwargs: self.connection_kwargs["conn_attrs"] = dict() self.connection_kwargs["conn_attrs"]["_connector_name"] = "langchain python sdk" self.connection_kwargs["conn_attrs"]["_connector_version"] = "1.0.1" # Create a connection pool. try: from sqlalchemy.pool import QueuePool except ImportError: raise ImportError( "Could not import sqlalchemy.pool python package. " "Please install it with `pip install singlestoredb`." ) self.connection_pool = QueuePool( self._get_connection, max_overflow=max_overflow, pool_size=pool_size, timeout=timeout, ) self.table_created = False def _sanitize_input(self, input_str: str) -> str: # Remove characters that are not alphanumeric or underscores return re.sub(r"[^a-zA-Z0-9_]", "", input_str) def _get_connection(self) -> Any: try: import singlestoredb as s2 except ImportError: raise ImportError( "Could not import singlestoredb python package. " "Please install it with `pip install singlestoredb`." ) return s2.connect(**self.connection_kwargs) def _create_table_if_not_exists(self) -> None: """Create table if it doesn't exist.""" if self.table_created: return conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """CREATE TABLE IF NOT EXISTS {} ({} BIGINT PRIMARY KEY AUTO_INCREMENT, {} TEXT NOT NULL, {} JSON NOT NULL);""".format( self.table_name, self.id_field, self.session_id_field, self.message_field, ), ) self.table_created = True finally: cur.close() finally: conn.close() @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() items = [] try: cur = conn.cursor() try: cur.execute( """SELECT {} FROM {} WHERE {} = %s""".format( self.message_field, self.table_name, self.session_id_field, ), (self.session_id), ) for row in cur.fetchall(): items.append(row[0]) finally: cur.close() finally: conn.close() messages = messages_from_dict(items) return messages def add_message(self, message: BaseMessage) -> None: """Append the message to the record in SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """INSERT INTO {} ({}, {}) VALUES (%s, %s)""".format( self.table_name, self.session_id_field, self.message_field, ), (self.session_id, json.dumps(message_to_dict(message))), ) finally: cur.close() finally: conn.close() def clear(self) -> None: """Clear session memory from SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """DELETE FROM {} WHERE {} = %s""".format( self.table_name, self.session_id_field, ), (self.session_id), ) finally: cur.close() finally: conn.close()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~callbacks~flyte_callback.py
"""FlyteKit callback handler.""" from __future__ import annotations import logging from copy import deepcopy from typing import TYPE_CHECKING, Any, Dict, List, Tuple from libs.core.langchain_core.agents import AgentAction, AgentFinish from libs.core.langchain_core.callbacks import BaseCallbackHandler from libs.core.langchain_core.outputs import LLMResult from langchain_community.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, import_pandas, import_spacy, import_textstat, ) if TYPE_CHECKING: import flytekit from flytekitplugins.deck import renderer logger = logging.getLogger(__name__) def import_flytekit() -> Tuple[flytekit, renderer]: """Import flytekit and flytekitplugins-deck-standard.""" try: import flytekit # noqa: F401 from flytekitplugins.deck import renderer # noqa: F401 except ImportError: raise ImportError( "To use the flyte callback manager you need" "to have the `flytekit` and `flytekitplugins-deck-standard`" "packages installed. Please install them with `pip install flytekit`" "and `pip install flytekitplugins-deck-standard`." ) return flytekit, renderer def analyze_text( text: str, nlp: Any = None, textstat: Any = None, ) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze. nlp (spacy.lang): The spacy language model to use for visualization. Returns: (dict): A dictionary containing the complexity metrics and visualization files serialized to HTML string. """ resp: Dict[str, Any] = {} if textstat is not None: text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update({"text_complexity_metrics": text_complexity_metrics}) resp.update(text_complexity_metrics) if nlp is not None: spacy = import_spacy() doc = nlp(text) dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True ) text_visualizations = { "dependency_tree": dep_out, "entities": ent_out, } resp.update(text_visualizations) return resp class FlyteCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """This callback handler that is used within a Flyte task.""" def __init__(self) -> None: """Initialize callback handler.""" flytekit, renderer = import_flytekit() self.pandas = import_pandas() self.textstat = None try: self.textstat = import_textstat() except ImportError: logger.warning( "Textstat library is not installed. \ It may result in the inability to log \ certain metrics that can be captured with Textstat." ) spacy = None try: spacy = import_spacy() except ImportError: logger.warning( "Spacy library is not installed. \ It may result in the inability to log \ certain metrics that can be captured with Spacy." ) super().__init__() self.nlp = None if spacy: try: self.nlp = spacy.load("en_core_web_sm") except OSError: logger.warning( "FlyteCallbackHandler uses spacy's en_core_web_sm model" " for certain metrics. To download," " run the following command in your terminal:" " `python -m spacy download en_core_web_sm`" ) self.table_renderer = renderer.TableRenderer self.markdown_renderer = renderer.MarkdownRenderer self.deck = flytekit.Deck( "LangChain Metrics", self.markdown_renderer().to_html("## LangChain Metrics"), ) def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 resp: Dict[str, Any] = {} resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) prompt_responses = [] for prompt in prompts: prompt_responses.append(prompt) resp.update({"prompts": prompt_responses}) self.deck.append(self.markdown_renderer().to_html("### LLM Start")) self.deck.append( self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n" ) def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.step += 1 self.llm_ends += 1 self.ends += 1 resp: Dict[str, Any] = {} resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html("### LLM End")) self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp]))) for generations in response.generations: for generation in generations: generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) if self.nlp or self.textstat: generation_resp.update( analyze_text( generation.text, nlp=self.nlp, textstat=self.textstat ) ) complexity_metrics: Dict[str, float] = generation_resp.pop( "text_complexity_metrics" ) # type: ignore # noqa: E501 self.deck.append( self.markdown_renderer().to_html("#### Text Complexity Metrics") ) self.deck.append( self.table_renderer().to_html( self.pandas.DataFrame([complexity_metrics]) ) + "\n" ) dependency_tree = generation_resp["dependency_tree"] self.deck.append( self.markdown_renderer().to_html("#### Dependency Tree") ) self.deck.append(dependency_tree) entities = generation_resp["entities"] self.deck.append(self.markdown_renderer().to_html("#### Entities")) self.deck.append(entities) else: self.deck.append( self.markdown_renderer().to_html("#### Generated Response") ) self.deck.append(self.markdown_renderer().to_html(generation.text)) def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp: Dict[str, Any] = {} resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()]) input_resp = deepcopy(resp) input_resp["inputs"] = chain_input self.deck.append(self.markdown_renderer().to_html("### Chain Start")) self.deck.append( self.table_renderer().to_html(self.pandas.DataFrame([input_resp])) + "\n" ) def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp: Dict[str, Any] = {} chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()]) resp.update({"action": "on_chain_end", "outputs": chain_output}) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html("### Chain End")) self.deck.append( self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n" ) def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp: Dict[str, Any] = {} resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html("### Tool Start")) self.deck.append( self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n" ) def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp: Dict[str, Any] = {} resp.update({"action": "on_tool_end", "output": output}) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html("### Tool End")) self.deck.append( self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n" ) def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.step += 1 self.text_ctr += 1 resp: Dict[str, Any] = {} resp.update({"action": "on_text", "text": text}) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html("### On Text")) self.deck.append( self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n" ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.step += 1 self.agent_ends += 1 self.ends += 1 resp: Dict[str, Any] = {} resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html("### Agent Finish")) self.deck.append( self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n" ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp: Dict[str, Any] = {} resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html("### Agent Action")) self.deck.append( self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n" )
[ "[]" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~etherscan.py
import os import re from typing import Iterator, List import requests from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class EtherscanLoader(BaseLoader): """Load transactions from `Ethereum` mainnet. The Loader use Etherscan API to interact with Ethereum mainnet. ETHERSCAN_API_KEY environment variable must be set use this loader. """ def __init__( self, account_address: str, api_key: str = "docs-demo", filter: str = "normal_transaction", page: int = 1, offset: int = 10, start_block: int = 0, end_block: int = 99999999, sort: str = "desc", ): self.account_address = account_address self.api_key = os.environ.get("ETHERSCAN_API_KEY") or api_key self.filter = filter self.page = page self.offset = offset self.start_block = start_block self.end_block = end_block self.sort = sort if not self.api_key: raise ValueError("Etherscan API key not provided") if not re.match(r"^0x[a-fA-F0-9]{40}$", self.account_address): raise ValueError(f"Invalid contract address {self.account_address}") if filter not in [ "normal_transaction", "internal_transaction", "erc20_transaction", "eth_balance", "erc721_transaction", "erc1155_transaction", ]: raise ValueError(f"Invalid filter {filter}") def lazy_load(self) -> Iterator[Document]: """Lazy load Documents from table.""" result = [] if self.filter == "normal_transaction": result = self.getNormTx() elif self.filter == "internal_transaction": result = self.getInternalTx() elif self.filter == "erc20_transaction": result = self.getERC20Tx() elif self.filter == "eth_balance": result = self.getEthBalance() elif self.filter == "erc721_transaction": result = self.getERC721Tx() elif self.filter == "erc1155_transaction": result = self.getERC1155Tx() else: raise ValueError(f"Invalid filter {filter}") for doc in result: yield doc def load(self) -> List[Document]: """Load transactions from spcifc account by Etherscan.""" return list(self.lazy_load()) def getNormTx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=txlist&address={self.account_address}" f"&startblock={self.start_block}&endblock={self.end_block}&page={self.page}" f"&offset={self.offset}&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) print(len(result)) return result def getEthBalance(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=balance" f"&address={self.account_address}&tag=latest&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) return [Document(page_content=response.json()["result"])] def getInternalTx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=txlistinternal" f"&address={self.account_address}&startblock={self.start_block}" f"&endblock={self.end_block}&page={self.page}&offset={self.offset}" f"&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) return result def getERC20Tx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=tokentx" f"&address={self.account_address}&startblock={self.start_block}" f"&endblock={self.end_block}&page={self.page}&offset={self.offset}" f"&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) return result def getERC721Tx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=tokennfttx" f"&address={self.account_address}&startblock={self.start_block}" f"&endblock={self.end_block}&page={self.page}&offset={self.offset}" f"&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) return result def getERC1155Tx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=token1155tx" f"&address={self.account_address}&startblock={self.start_block}" f"&endblock={self.end_block}&page={self.page}&offset={self.offset}" f"&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) return result
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~tools~render.py
"""Different methods for rendering Tools to be passed to LLMs. Depending on the LLM you are using and the prompting strategy you are using, you may want Tools to be rendered in a different way. This module contains various ways to render tools. """ from typing import List # For backwards compatibility from langchain_community.tools.convert_to_openai import ( format_tool_to_openai_function, format_tool_to_openai_tool, ) from libs.core.langchain_core.tools import BaseTool __all__ = [ "render_text_description", "render_text_description_and_args", "format_tool_to_openai_tool", "format_tool_to_openai_function", ] def render_text_description(tools: List[BaseTool]) -> str: """Render the tool name and description in plain text. Output will be in the format of: .. code-block:: markdown search: This tool is used for search calculator: This tool is used for math """ return "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) def render_text_description_and_args(tools: List[BaseTool]) -> str: """Render the tool name, description, and args in plain text. Output will be in the format of: .. code-block:: markdown search: This tool is used for search, args: {"query": {"type": "string"}} calculator: This tool is used for math, \ args: {"expression": {"type": "string"}} """ tool_strings = [] for tool in tools: args_schema = str(tool.args) tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}") return "\n".join(tool_strings)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~bigquery.py
from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.utilities.vertexai import get_client_info if TYPE_CHECKING: from google.auth.credentials import Credentials class BigQueryLoader(BaseLoader): """Load from the Google Cloud Platform `BigQuery`. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, project: Optional[str] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, credentials: Optional[Credentials] = None, ): """Initialize BigQuery document loader. Args: query: The query to run in BigQuery. project: Optional. The project to run the query in. page_content_columns: Optional. The columns to write into the `page_content` of the document. metadata_columns: Optional. The columns to write into the `metadata` of the document. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine (`google.auth.compute_engine.Credentials`) or Service Account (`google.oauth2.service_account.Credentials`) credentials directly. """ self.query = query self.project = project self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns self.credentials = credentials def load(self) -> List[Document]: try: from google.cloud import bigquery except ImportError as ex: raise ImportError( "Could not import google-cloud-bigquery python package. " "Please install it with `pip install google-cloud-bigquery`." ) from ex bq_client = bigquery.Client( credentials=self.credentials, project=self.project, client_info=get_client_info(module="bigquery"), ) if not bq_client.project: error_desc = ( "GCP project for Big Query is not set! Either provide a " "`project` argument during BigQueryLoader instantiation, " "or set a default project with `gcloud config set project` " "command." ) raise ValueError(error_desc) query_result = bq_client.query(self.query).result() docs: List[Document] = [] page_content_columns = self.page_content_columns metadata_columns = self.metadata_columns if page_content_columns is None: page_content_columns = [column.name for column in query_result.schema] if metadata_columns is None: metadata_columns = [] for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns ) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~playwright~get_elements.py
from __future__ import annotations import json from typing import TYPE_CHECKING, List, Optional, Sequence, Type from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.playwright.utils import ( aget_current_page, get_current_page, ) if TYPE_CHECKING: from playwright.async_api import Page as AsyncPage from playwright.sync_api import Page as SyncPage class GetElementsToolInput(BaseModel): """Input for GetElementsTool.""" selector: str = Field( ..., description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname", ) attributes: List[str] = Field( default_factory=lambda: ["innerText"], description="Set of attributes to retrieve for each element", ) async def _aget_elements( page: AsyncPage, selector: str, attributes: Sequence[str] ) -> List[dict]: """Get elements matching the given CSS selector.""" elements = await page.query_selector_all(selector) results = [] for element in elements: result = {} for attribute in attributes: if attribute == "innerText": val: Optional[str] = await element.inner_text() else: val = await element.get_attribute(attribute) if val is not None and val.strip() != "": result[attribute] = val if result: results.append(result) return results def _get_elements( page: SyncPage, selector: str, attributes: Sequence[str] ) -> List[dict]: """Get elements matching the given CSS selector.""" elements = page.query_selector_all(selector) results = [] for element in elements: result = {} for attribute in attributes: if attribute == "innerText": val: Optional[str] = element.inner_text() else: val = element.get_attribute(attribute) if val is not None and val.strip() != "": result[attribute] = val if result: results.append(result) return results class GetElementsTool(BaseBrowserTool): """Tool for getting elements in the current web page matching a CSS selector.""" name: str = "get_elements" description: str = ( "Retrieve elements in the current web page matching the given CSS selector" ) args_schema: Type[BaseModel] = GetElementsToolInput def _run( self, selector: str, attributes: Sequence[str] = ["innerText"], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) # Navigate to the desired webpage before using this tool results = _get_elements(page, selector, attributes) return json.dumps(results, ensure_ascii=False) async def _arun( self, selector: str, attributes: Sequence[str] = ["innerText"], run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) # Navigate to the desired webpage before using this tool results = await _aget_elements(page, selector, attributes) return json.dumps(results, ensure_ascii=False)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~yandex.py
"""Wrapper around YandexGPT chat models.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional, cast from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.messages import ( AIMessage, BaseMessage, HumanMessage, SystemMessage, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatResult from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain_community.llms.utils import enforce_stop_tokens from langchain_community.llms.yandex import _BaseYandexGPT logger = logging.getLogger(__name__) def _parse_message(role: str, text: str) -> Dict: return {"role": role, "text": text} def _parse_chat_history(history: List[BaseMessage]) -> List[Dict[str, str]]: """Parse a sequence of messages into history. Returns: A list of parsed messages. """ chat_history = [] for message in history: content = cast(str, message.content) if isinstance(message, HumanMessage): chat_history.append(_parse_message("user", content)) if isinstance(message, AIMessage): chat_history.append(_parse_message("assistant", content)) if isinstance(message, SystemMessage): chat_history.append(_parse_message("system", content)) return chat_history class ChatYandexGPT(_BaseYandexGPT, BaseChatModel): """Wrapper around YandexGPT large language models. There are two authentication options for the service account with the ``ai.languageModels.user`` role: - You can specify the token in a constructor parameter `iam_token` or in an environment variable `YC_IAM_TOKEN`. - You can specify the key in a constructor parameter `api_key` or in an environment variable `YC_API_KEY`. Example: .. code-block:: python from langchain_community.chat_models import ChatYandexGPT chat_model = ChatYandexGPT(iam_token="t1.9eu...") """ def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ text = completion_with_retry(self, messages=messages) text = text if stop is None else enforce_stop_tokens(text, stop) message = AIMessage(content=text) return ChatResult(generations=[ChatGeneration(message=message)]) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Async method to generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ text = await acompletion_with_retry(self, messages=messages) text = text if stop is None else enforce_stop_tokens(text, stop) message = AIMessage(content=text) return ChatResult(generations=[ChatGeneration(message=message)]) def _make_request( self: ChatYandexGPT, messages: List[BaseMessage], ) -> str: try: import grpc from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import ( CompletionOptions, Message, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501 CompletionRequest, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501 TextGenerationServiceStub, ) except ImportError as e: raise ImportError( "Please install YandexCloud SDK" " with `pip install yandexcloud`." ) from e if not messages: raise ValueError("You should provide at least one message to start the chat!") message_history = _parse_chat_history(messages) channel_credentials = grpc.ssl_channel_credentials() channel = grpc.secure_channel(self.url, channel_credentials) request = CompletionRequest( model_uri=self.model_uri, completion_options=CompletionOptions( temperature=DoubleValue(value=self.temperature), max_tokens=Int64Value(value=self.max_tokens), ), messages=[Message(**message) for message in message_history], ) stub = TextGenerationServiceStub(channel) res = stub.Completion(request, metadata=self._grpc_metadata) return list(res)[0].alternatives[0].message.text async def _amake_request(self: ChatYandexGPT, messages: List[BaseMessage]) -> str: try: import asyncio import grpc from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import ( CompletionOptions, Message, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501 CompletionRequest, CompletionResponse, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501 TextGenerationAsyncServiceStub, ) from yandex.cloud.operation.operation_service_pb2 import GetOperationRequest from yandex.cloud.operation.operation_service_pb2_grpc import ( OperationServiceStub, ) except ImportError as e: raise ImportError( "Please install YandexCloud SDK" " with `pip install yandexcloud`." ) from e if not messages: raise ValueError("You should provide at least one message to start the chat!") message_history = _parse_chat_history(messages) operation_api_url = "operation.api.cloud.yandex.net:443" channel_credentials = grpc.ssl_channel_credentials() async with grpc.aio.secure_channel(self.url, channel_credentials) as channel: request = CompletionRequest( model_uri=self.model_uri, completion_options=CompletionOptions( temperature=DoubleValue(value=self.temperature), max_tokens=Int64Value(value=self.max_tokens), ), messages=[Message(**message) for message in message_history], ) stub = TextGenerationAsyncServiceStub(channel) operation = await stub.Completion(request, metadata=self._grpc_metadata) async with grpc.aio.secure_channel( operation_api_url, channel_credentials ) as operation_channel: operation_stub = OperationServiceStub(operation_channel) while not operation.done: await asyncio.sleep(1) operation_request = GetOperationRequest(operation_id=operation.id) operation = await operation_stub.Get( operation_request, metadata=self._grpc_metadata ) completion_response = CompletionResponse() operation.response.Unpack(completion_response) return completion_response.alternatives[0].message.text def _create_retry_decorator(llm: ChatYandexGPT) -> Callable[[Any], Any]: from grpc import RpcError min_seconds = 1 max_seconds = 60 return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type((RpcError))), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: ChatYandexGPT, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**_kwargs: Any) -> Any: return _make_request(llm, **_kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry(llm: ChatYandexGPT, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**_kwargs: Any) -> Any: return await _amake_request(llm, **_kwargs) return await _completion_with_retry(**kwargs)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~chat_models~test_vertexai.py
"""Test Vertex AI API wrapper. In order to run this test, you need to install VertexAI SDK (that is is the private preview) and be whitelisted to list the models themselves: In order to run this test, you need to install VertexAI SDK pip install google-cloud-aiplatform>=1.35.0 Your end-user credentials would be used to make the calls (make sure you've run `gcloud auth login` first). """ from typing import Optional from unittest.mock import MagicMock, Mock, patch import pytest from libs.core.langchain_core.messages import ( AIMessage, AIMessageChunk, HumanMessage, SystemMessage, ) from libs.core.langchain_core.outputs import LLMResult from langchain_community.chat_models import ChatVertexAI from langchain_community.chat_models.vertexai import ( _parse_chat_history, _parse_examples, ) model_names_to_test = [None, "codechat-bison", "chat-bison", "gemini-pro"] @pytest.mark.parametrize("model_name", model_names_to_test) def test_vertexai_instantiation(model_name: str) -> None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() assert model._llm_type == "vertexai" try: assert model.model_name == model.client._model_id except AttributeError: assert model.model_name == model.client._model_name.split("/")[-1] @pytest.mark.scheduled @pytest.mark.parametrize("model_name", model_names_to_test) def test_vertexai_single_call(model_name: str) -> None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() message = HumanMessage(content="Hello") response = model([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) # mark xfail because Vertex API randomly doesn't respect # the n/candidate_count parameter @pytest.mark.xfail @pytest.mark.scheduled def test_candidates() -> None: model = ChatVertexAI(model_name="chat-bison@001", temperature=0.3, n=2) message = HumanMessage(content="Hello") response = model.generate(messages=[[message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 1 assert len(response.generations[0]) == 2 @pytest.mark.scheduled @pytest.mark.parametrize("model_name", ["chat-bison@001", "gemini-pro"]) async def test_vertexai_agenerate(model_name: str) -> None: model = ChatVertexAI(temperature=0, model_name=model_name) message = HumanMessage(content="Hello") response = await model.agenerate([[message]]) assert isinstance(response, LLMResult) assert isinstance(response.generations[0][0].message, AIMessage) # type: ignore sync_response = model.generate([[message]]) assert response.generations[0][0] == sync_response.generations[0][0] @pytest.mark.scheduled @pytest.mark.parametrize("model_name", ["chat-bison@001", "gemini-pro"]) def test_vertexai_stream(model_name: str) -> None: model = ChatVertexAI(temperature=0, model_name=model_name) message = HumanMessage(content="Hello") sync_response = model.stream([message]) for chunk in sync_response: assert isinstance(chunk, AIMessageChunk) @pytest.mark.scheduled def test_vertexai_single_call_with_context() -> None: model = ChatVertexAI() raw_context = ( "My name is Ned. You are my personal assistant. My favorite movies " "are Lord of the Rings and Hobbit." ) question = ( "Hello, could you recommend a good movie for me to watch this evening, please?" ) context = SystemMessage(content=raw_context) message = HumanMessage(content=question) response = model([context, message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_multimodal() -> None: llm = ChatVertexAI(model_name="gemini-ultra-vision") gcs_url = ( "gs://cloud-samples-data/generative-ai/image/" "320px-Felis_catus-cat_on_snow.jpg" ) image_message = { "type": "image_url", "image_url": {"url": gcs_url}, } text_message = { "type": "text", "text": "What is shown in this image?", } message = HumanMessage(content=[text_message, image_message]) output = llm([message]) assert isinstance(output.content, str) def test_multimodal_history() -> None: llm = ChatVertexAI(model_name="gemini-ultra-vision") gcs_url = ( "gs://cloud-samples-data/generative-ai/image/" "320px-Felis_catus-cat_on_snow.jpg" ) image_message = { "type": "image_url", "image_url": {"url": gcs_url}, } text_message = { "type": "text", "text": "What is shown in this image?", } message1 = HumanMessage(content=[text_message, image_message]) message2 = AIMessage( content=( "This is a picture of a cat in the snow. The cat is a tabby cat, which is " "a type of cat with a striped coat. The cat is standing in the snow, and " "its fur is covered in snow." ) ) message3 = HumanMessage(content="What time of day is it?") response = llm([message1, message2, message3]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @pytest.mark.scheduled def test_vertexai_single_call_with_examples() -> None: model = ChatVertexAI() raw_context = "My name is Ned. You are my personal assistant." question = "2+2" text_question, text_answer = "4+4", "8" inp = HumanMessage(content=text_question) output = AIMessage(content=text_answer) context = SystemMessage(content=raw_context) message = HumanMessage(content=question) response = model([context, message], examples=[inp, output]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @pytest.mark.scheduled @pytest.mark.parametrize("model_name", model_names_to_test) def test_vertexai_single_call_with_history(model_name: str) -> None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() text_question1, text_answer1 = "How much is 2+2?", "4" text_question2 = "How much is 3+3?" message1 = HumanMessage(content=text_question1) message2 = AIMessage(content=text_answer1) message3 = HumanMessage(content=text_question2) response = model([message1, message2, message3]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_parse_chat_history_correct() -> None: from vertexai.language_models import ChatMessage text_context = ( "My name is Ned. You are my personal assistant. My " "favorite movies are Lord of the Rings and Hobbit." ) context = SystemMessage(content=text_context) text_question = ( "Hello, could you recommend a good movie for me to watch this evening, please?" ) question = HumanMessage(content=text_question) text_answer = ( "Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring " "(2001): This is the first movie in the Lord of the Rings trilogy." ) answer = AIMessage(content=text_answer) history = _parse_chat_history([context, question, answer, question, answer]) assert history.context == context.content assert len(history.history) == 4 assert history.history == [ ChatMessage(content=text_question, author="user"), ChatMessage(content=text_answer, author="bot"), ChatMessage(content=text_question, author="user"), ChatMessage(content=text_answer, author="bot"), ] def test_vertexai_single_call_fails_no_message() -> None: chat = ChatVertexAI() with pytest.raises(ValueError) as exc_info: _ = chat([]) assert ( str(exc_info.value) == "You should provide at least one message to start the chat!" ) @pytest.mark.parametrize("stop", [None, "stop1"]) def test_vertexai_args_passed(stop: Optional[str]) -> None: response_text = "Goodbye" user_prompt = "Hello" prompt_params = { "max_output_tokens": 1, "temperature": 10000.0, "top_k": 10, "top_p": 0.5, } # Mock the library to ensure the args are passed correctly with patch( "vertexai.language_models._language_models.ChatModel.start_chat" ) as start_chat: mock_response = MagicMock() mock_response.candidates = [Mock(text=response_text)] mock_chat = MagicMock() start_chat.return_value = mock_chat mock_send_message = MagicMock(return_value=mock_response) mock_chat.send_message = mock_send_message model = ChatVertexAI(**prompt_params) message = HumanMessage(content=user_prompt) if stop: response = model([message], stop=[stop]) else: response = model([message]) assert response.content == response_text mock_send_message.assert_called_once_with(user_prompt, candidate_count=1) expected_stop_sequence = [stop] if stop else None start_chat.assert_called_once_with( context=None, message_history=[], **prompt_params, stop_sequences=expected_stop_sequence, ) def test_parse_examples_correct() -> None: from vertexai.language_models import InputOutputTextPair text_question = ( "Hello, could you recommend a good movie for me to watch this evening, please?" ) question = HumanMessage(content=text_question) text_answer = ( "Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring " "(2001): This is the first movie in the Lord of the Rings trilogy." ) answer = AIMessage(content=text_answer) examples = _parse_examples([question, answer, question, answer]) assert len(examples) == 2 assert examples == [ InputOutputTextPair(input_text=text_question, output_text=text_answer), InputOutputTextPair(input_text=text_question, output_text=text_answer), ] def test_parse_examples_failes_wrong_sequence() -> None: with pytest.raises(ValueError) as exc_info: _ = _parse_examples([AIMessage(content="a")]) print(str(exc_info.value)) assert ( str(exc_info.value) == "Expect examples to have an even amount of messages, got 1." )
[ "{'max_output_tokens': 1, 'temperature': 10000.0, 'top_k': 10, 'top_p': 0.5}", "This is a picture of a cat in the snow. The cat is a tabby cat, which is a type of cat with a striped coat. The cat is standing in the snow, and its fur is covered in snow.", "a", "My name is Ned. You are my personal assistant.", "[PLACEHOLDER, PLACEHOLDER]", "What time of day is it?", "2+2", "Hello", "How much is 3+3?" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~storage~exceptions.py
from libs.core.langchain_core.exceptions import LangChainException class InvalidKeyException(LangChainException): """Raised when a key is invalid; e.g., uses incorrect characters."""
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~qa_with_sources~loading.py
"""Load question answering with sources chains.""" from __future__ import annotations from typing import Any, Mapping, Optional, Protocol from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.prompts import BasePromptTemplate from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.reduce import ReduceDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.qa_with_sources import ( map_reduce_prompt, refine_prompts, stuff_prompt, ) from langchain.chains.question_answering.map_rerank_prompt import ( PROMPT as MAP_RERANK_PROMPT, ) class LoadingCallable(Protocol): """Interface for loading the combine documents chain.""" def __call__( self, llm: BaseLanguageModel, **kwargs: Any ) -> BaseCombineDocumentsChain: """Callable to load the combine documents chain.""" def _load_map_rerank_chain( llm: BaseLanguageModel, prompt: BasePromptTemplate = MAP_RERANK_PROMPT, verbose: bool = False, document_variable_name: str = "context", rank_key: str = "score", answer_key: str = "answer", **kwargs: Any, ) -> MapRerankDocumentsChain: llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) return MapRerankDocumentsChain( llm_chain=llm_chain, rank_key=rank_key, answer_key=answer_key, document_variable_name=document_variable_name, **kwargs, ) def _load_stuff_chain( llm: BaseLanguageModel, prompt: BasePromptTemplate = stuff_prompt.PROMPT, document_prompt: BasePromptTemplate = stuff_prompt.EXAMPLE_PROMPT, document_variable_name: str = "summaries", verbose: Optional[bool] = None, **kwargs: Any, ) -> StuffDocumentsChain: llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) return StuffDocumentsChain( llm_chain=llm_chain, document_variable_name=document_variable_name, document_prompt=document_prompt, verbose=verbose, **kwargs, ) def _load_map_reduce_chain( llm: BaseLanguageModel, question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT, combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT, document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT, combine_document_variable_name: str = "summaries", map_reduce_document_variable_name: str = "context", collapse_prompt: Optional[BasePromptTemplate] = None, reduce_llm: Optional[BaseLanguageModel] = None, collapse_llm: Optional[BaseLanguageModel] = None, verbose: Optional[bool] = None, token_max: int = 3000, **kwargs: Any, ) -> MapReduceDocumentsChain: map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) _reduce_llm = reduce_llm or llm reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose) combine_documents_chain = StuffDocumentsChain( llm_chain=reduce_chain, document_variable_name=combine_document_variable_name, document_prompt=document_prompt, verbose=verbose, ) if collapse_prompt is None: collapse_chain = None if collapse_llm is not None: raise ValueError( "collapse_llm provided, but collapse_prompt was not: please " "provide one or stop providing collapse_llm." ) else: _collapse_llm = collapse_llm or llm collapse_chain = StuffDocumentsChain( llm_chain=LLMChain( llm=_collapse_llm, prompt=collapse_prompt, verbose=verbose, ), document_variable_name=combine_document_variable_name, document_prompt=document_prompt, ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_chain, token_max=token_max, verbose=verbose, ) return MapReduceDocumentsChain( llm_chain=map_chain, reduce_documents_chain=reduce_documents_chain, document_variable_name=map_reduce_document_variable_name, verbose=verbose, **kwargs, ) def _load_refine_chain( llm: BaseLanguageModel, question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT, refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT, document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT, document_variable_name: str = "context_str", initial_response_name: str = "existing_answer", refine_llm: Optional[BaseLanguageModel] = None, verbose: Optional[bool] = None, **kwargs: Any, ) -> RefineDocumentsChain: initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) _refine_llm = refine_llm or llm refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) return RefineDocumentsChain( initial_llm_chain=initial_chain, refine_llm_chain=refine_chain, document_variable_name=document_variable_name, initial_response_name=initial_response_name, document_prompt=document_prompt, verbose=verbose, **kwargs, ) def load_qa_with_sources_chain( llm: BaseLanguageModel, chain_type: str = "stuff", verbose: Optional[bool] = None, **kwargs: Any, ) -> BaseCombineDocumentsChain: """Load a question answering with sources chain. Args: llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", "refine" and "map_rerank". verbose: Whether chains should be run in verbose mode or not. Note that this applies to all chains that make up the final chain. Returns: A chain to use for question answering with sources. """ loader_mapping: Mapping[str, LoadingCallable] = { "stuff": _load_stuff_chain, "map_reduce": _load_map_reduce_chain, "refine": _load_refine_chain, "map_rerank": _load_map_rerank_chain, } if chain_type not in loader_mapping: raise ValueError( f"Got unsupported chain type: {chain_type}. " f"Should be one of {loader_mapping.keys()}" ) _func: LoadingCallable = loader_mapping[chain_type] return _func(llm, verbose=verbose, **kwargs)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~twitter.py
from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader if TYPE_CHECKING: import tweepy from tweepy import OAuth2BearerHandler, OAuthHandler def _dependable_tweepy_import() -> tweepy: try: import tweepy except ImportError: raise ImportError( "tweepy package not found, please install it with `pip install tweepy`" ) return tweepy class TwitterTweetLoader(BaseLoader): """Load `Twitter` tweets. Read tweets of the user's Twitter handle. First you need to go to `https://developer.twitter.com/en/docs/twitter-api /getting-started/getting-access-to-the-twitter-api` to get your token. And create a v2 version of the app. """ def __init__( self, auth_handler: Union[OAuthHandler, OAuth2BearerHandler], twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ): self.auth = auth_handler self.twitter_users = twitter_users self.number_tweets = number_tweets def load(self) -> List[Document]: """Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self.number_tweets) user = api.get_user(screen_name=username) docs = self._format_tweets(tweets, user) results.extend(docs) return results def _format_tweets( self, tweets: List[Dict[str, Any]], user_info: dict ) -> Iterable[Document]: """Format tweets into a string.""" for tweet in tweets: metadata = { "created_at": tweet["created_at"], "user_info": user_info, } yield Document( page_content=tweet["text"], metadata=metadata, ) @classmethod def from_bearer_token( cls, oauth2_bearer_token: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from OAuth2 bearer token.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, ) @classmethod def from_secrets( cls, access_token: str, access_token_secret: str, consumer_key: str, consumer_secret: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from access tokens and secrets.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuthHandler( access_token=access_token, access_token_secret=access_token_secret, consumer_key=consumer_key, consumer_secret=consumer_secret, ) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~powerbi~chat_base.py
"""Power BI agent.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Optional from libs.core.langchain_core.callbacks import BaseCallbackManager from libs.core.langchain_core.language_models.chat_models import BaseChatModel from langchain_community.agent_toolkits.powerbi.prompt import ( POWERBI_CHAT_PREFIX, POWERBI_CHAT_SUFFIX, ) from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit from langchain_community.utilities.powerbi import PowerBIDataset if TYPE_CHECKING: from langchain.agents import AgentExecutor from langchain.agents.agent import AgentOutputParser from langchain.memory.chat_memory import BaseChatMemory def create_pbi_chat_agent( llm: BaseChatModel, toolkit: Optional[PowerBIToolkit] = None, powerbi: Optional[PowerBIDataset] = None, callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = POWERBI_CHAT_PREFIX, suffix: str = POWERBI_CHAT_SUFFIX, examples: Optional[str] = None, input_variables: Optional[List[str]] = None, memory: Optional[BaseChatMemory] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> AgentExecutor: """Construct a Power BI agent from a Chat LLM and tools. If you supply only a toolkit and no Power BI dataset, the same LLM is used for both. """ from langchain.agents import AgentExecutor from langchain.agents.conversational_chat.base import ConversationalChatAgent from langchain.memory import ConversationBufferMemory if toolkit is None: if powerbi is None: raise ValueError("Must provide either a toolkit or powerbi dataset") toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples) tools = toolkit.get_tools() tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names agent = ConversationalChatAgent.from_llm_and_tools( llm=llm, tools=tools, system_message=prefix.format(top_k=top_k).format(tables=tables), human_message=suffix, input_variables=input_variables, callback_manager=callback_manager, output_parser=output_parser, verbose=verbose, **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, memory=memory or ConversationBufferMemory(memory_key="chat_history", return_messages=True), verbose=verbose, **(agent_executor_kwargs or {}), )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~clarifai.py
from __future__ import annotations import logging import os import traceback import uuid from concurrent.futures import ThreadPoolExecutor from typing import Any, Iterable, List, Optional, Tuple import requests from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore logger = logging.getLogger(__name__) class Clarifai(VectorStore): """`Clarifai AI` vector store. To use, you should have the ``clarifai`` python SDK package installed. Example: .. code-block:: python from langchain_community.vectorstores import Clarifai from langchain_community.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = Clarifai("langchain_store", embeddings.embed_query) """ def __init__( self, user_id: Optional[str] = None, app_id: Optional[str] = None, number_of_docs: Optional[int] = None, pat: Optional[str] = None, ) -> None: """Initialize with Clarifai client. Args: user_id (Optional[str], optional): User ID. Defaults to None. app_id (Optional[str], optional): App ID. Defaults to None. pat (Optional[str], optional): Personal access token. Defaults to None. number_of_docs (Optional[int], optional): Number of documents to return during vector search. Defaults to None. api_base (Optional[str], optional): API base. Defaults to None. Raises: ValueError: If user ID, app ID or personal access token is not provided. """ self._user_id = user_id or os.environ.get("CLARIFAI_USER_ID") self._app_id = app_id or os.environ.get("CLARIFAI_APP_ID") if pat: os.environ["CLARIFAI_PAT"] = pat self._pat = os.environ.get("CLARIFAI_PAT") if self._user_id is None or self._app_id is None or self._pat is None: raise ValueError( "Could not find CLARIFAI_USER_ID, CLARIFAI_APP_ID or\ CLARIFAI_PAT in your environment. " "Please set those env variables with a valid user ID, \ app ID and personal access token \ from https://clarifai.com/settings/security." ) self._number_of_docs = number_of_docs def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add texts to the Clarifai vectorstore. This will push the text to a Clarifai application. Application use a base workflow that create and store embedding for each text. Make sure you are using a base workflow that is compatible with text (such as Language Understanding). Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. """ try: from clarifai.client.input import Inputs from google.protobuf.struct_pb2 import Struct except ImportError as e: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) from e ltexts = list(texts) length = len(ltexts) assert length > 0, "No texts provided to add to the vectorstore." if metadatas is not None: assert length == len( metadatas ), "Number of texts and metadatas should be the same." if ids is not None: assert len(ltexts) == len( ids ), "Number of text inputs and input ids should be the same." input_obj = Inputs(app_id=self._app_id, user_id=self._user_id) batch_size = 32 input_job_ids = [] for idx in range(0, length, batch_size): try: batch_texts = ltexts[idx : idx + batch_size] batch_metadatas = ( metadatas[idx : idx + batch_size] if metadatas else None ) if ids is None: batch_ids = [uuid.uuid4().hex for _ in range(len(batch_texts))] else: batch_ids = ids[idx : idx + batch_size] if batch_metadatas is not None: meta_list = [] for meta in batch_metadatas: meta_struct = Struct() meta_struct.update(meta) meta_list.append(meta_struct) input_batch = [ input_obj.get_text_input( input_id=batch_ids[i], raw_text=text, metadata=meta_list[i] if batch_metadatas else None, ) for i, text in enumerate(batch_texts) ] result_id = input_obj.upload_inputs(inputs=input_batch) input_job_ids.extend(result_id) logger.debug("Input posted successfully.") except Exception as error: logger.warning(f"Post inputs failed: {error}") traceback.print_exc() return input_job_ids def similarity_search_with_score( self, query: str, k: int = 4, filters: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with score using Clarifai. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of documents most similar to the query text. """ try: from clarifai.client.search import Search from clarifai_grpc.grpc.api import resources_pb2 from google.protobuf import json_format # type: ignore except ImportError as e: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) from e # Get number of docs to return if self._number_of_docs is not None: k = self._number_of_docs search_obj = Search(user_id=self._user_id, app_id=self._app_id, top_k=k) rank = [{"text_raw": query}] # Add filter by metadata if provided. if filters is not None: search_metadata = {"metadata": filters} search_response = search_obj.query(ranks=rank, filters=[search_metadata]) else: search_response = search_obj.query(ranks=rank) # Retrieve hits hits = [hit for data in search_response for hit in data.hits] executor = ThreadPoolExecutor(max_workers=10) def hit_to_document(hit: resources_pb2.Hit) -> Tuple[Document, float]: metadata = json_format.MessageToDict(hit.input.data.metadata) h = {"Authorization": f"Key {self._pat}"} request = requests.get(hit.input.data.text.url, headers=h) # override encoding by real educated guess as provided by chardet request.encoding = request.apparent_encoding requested_text = request.text logger.debug( f"\tScore {hit.score:.2f} for annotation: {hit.annotation.id}\ off input: {hit.input.id}, text: {requested_text[:125]}" ) return (Document(page_content=requested_text, metadata=metadata), hit.score) # Iterate over hits and retrieve metadata and text futures = [executor.submit(hit_to_document, hit) for hit in hits] docs_and_scores = [future.result() for future in futures] return docs_and_scores def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: """Run similarity search using Clarifai. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score(query, **kwargs) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, user_id: Optional[str] = None, app_id: Optional[str] = None, number_of_docs: Optional[int] = None, pat: Optional[str] = None, **kwargs: Any, ) -> Clarifai: """Create a Clarifai vectorstore from a list of texts. Args: user_id (str): User ID. app_id (str): App ID. texts (List[str]): List of texts to add. number_of_docs (Optional[int]): Number of documents to return during vector search. Defaults to None. metadatas (Optional[List[dict]]): Optional list of metadatas. Defaults to None. Returns: Clarifai: Clarifai vectorstore. """ clarifai_vector_db = cls( user_id=user_id, app_id=app_id, number_of_docs=number_of_docs, pat=pat, ) clarifai_vector_db.add_texts(texts=texts, metadatas=metadatas) return clarifai_vector_db @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, user_id: Optional[str] = None, app_id: Optional[str] = None, number_of_docs: Optional[int] = None, pat: Optional[str] = None, **kwargs: Any, ) -> Clarifai: """Create a Clarifai vectorstore from a list of documents. Args: user_id (str): User ID. app_id (str): App ID. documents (List[Document]): List of documents to add. number_of_docs (Optional[int]): Number of documents to return during vector search. Defaults to None. Returns: Clarifai: Clarifai vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( user_id=user_id, app_id=app_id, texts=texts, number_of_docs=number_of_docs, pat=pat, metadatas=metadatas, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~ctransformers.py
from functools import partial from typing import Any, Dict, List, Optional, Sequence from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import root_validator class CTransformers(LLM): """C Transformers LLM models. To use, you should have the ``ctransformers`` python package installed. See https://github.com/marella/ctransformers Example: .. code-block:: python from langchain_community.llms import CTransformers llm = CTransformers(model="/path/to/ggml-gpt-2.bin", model_type="gpt2") """ client: Any #: :meta private: model: str """The path to a model file or directory or the name of a Hugging Face Hub model repo.""" model_type: Optional[str] = None """The model type.""" model_file: Optional[str] = None """The name of the model file in repo or directory.""" config: Optional[Dict[str, Any]] = None """The config parameters. See https://github.com/marella/ctransformers#config""" lib: Optional[str] = None """The path to a shared library or one of `avx2`, `avx`, `basic`.""" @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "model": self.model, "model_type": self.model_type, "model_file": self.model_file, "config": self.config, } @property def _llm_type(self) -> str: """Return type of llm.""" return "ctransformers" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that ``ctransformers`` package is installed.""" try: from ctransformers import AutoModelForCausalLM except ImportError: raise ImportError( "Could not import `ctransformers` package. " "Please install it with `pip install ctransformers`" ) config = values["config"] or {} values["client"] = AutoModelForCausalLM.from_pretrained( values["model"], model_type=values["model_type"], model_file=values["model_file"], lib=values["lib"], **config, ) return values def _call( self, prompt: str, stop: Optional[Sequence[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of sequences to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python response = llm("Tell me a joke.") """ text = [] _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() for chunk in self.client(prompt, stop=stop, stream=True): text.append(chunk) _run_manager.on_llm_new_token(chunk, verbose=self.verbose) return "".join(text) async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Asynchronous Call out to CTransformers generate method. Very helpful when streaming (like with websockets!) Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python response = llm("Once upon a time, ") """ text_callback = None if run_manager: text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose) text = "" for token in self.client(prompt, stop=stop, stream=True): if text_callback: await text_callback(token) text += token return text
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~combine_documents~map_reduce.py
"""Combining documents by mapping a chain over them first, then combining results.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Tuple, Type from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, create_model, root_validator from libs.core.langchain_core.runnables.config import RunnableConfig from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.reduce import ReduceDocumentsChain from langchain.chains.llm import LLMChain class MapReduceDocumentsChain(BaseCombineDocumentsChain): """Combining documents by mapping a chain over them, then combining results. We first call `llm_chain` on each document individually, passing in the `page_content` and any other kwargs. This is the `map` step. We then process the results of that `map` step in a `reduce` step. This should likely be a ReduceDocumentsChain. Example: .. code-block:: python from langchain.chains import ( StuffDocumentsChain, LLMChain, ReduceDocumentsChain, MapReduceDocumentsChain, ) from libs.core.langchain_core.prompts import PromptTemplate from langchain.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more # details. document_prompt = PromptTemplate( input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" llm = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template( "Summarize this content: {context}" ) llm_chain = LLMChain(llm=llm, prompt=prompt) # We now define how to combine these summaries reduce_prompt = PromptTemplate.from_template( "Combine these summaries: {context}" ) reduce_llm_chain = LLMChain(llm=llm, prompt=reduce_prompt) combine_documents_chain = StuffDocumentsChain( llm_chain=reduce_llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, ) chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, ) # If we wanted to, we could also pass in collapse_documents_chain # which is specifically aimed at collapsing documents BEFORE # the final call. prompt = PromptTemplate.from_template( "Collapse this content: {context}" ) llm_chain = LLMChain(llm=llm, prompt=prompt) collapse_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_documents_chain, ) chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, ) """ llm_chain: LLMChain """Chain to apply to each document individually.""" reduce_documents_chain: BaseCombineDocumentsChain """Chain to use to reduce the results of applying `llm_chain` to each doc. This typically either a ReduceDocumentChain or StuffDocumentChain.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" return_intermediate_steps: bool = False """Return the results of the map steps in the output.""" def get_output_schema( self, config: Optional[RunnableConfig] = None ) -> Type[BaseModel]: if self.return_intermediate_steps: return create_model( "MapReduceDocumentsOutput", **{ self.output_key: (str, None), "intermediate_steps": (List[str], None), }, # type: ignore[call-overload] ) return super().get_output_schema(config) @property def output_keys(self) -> List[str]: """Expect input key. :meta private: """ _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = _output_keys + ["intermediate_steps"] return _output_keys class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def get_reduce_chain(cls, values: Dict) -> Dict: """For backwards compatibility.""" if "combine_document_chain" in values: if "reduce_documents_chain" in values: raise ValueError( "Both `reduce_documents_chain` and `combine_document_chain` " "cannot be provided at the same time. `combine_document_chain` " "is deprecated, please only provide `reduce_documents_chain`" ) combine_chain = values["combine_document_chain"] collapse_chain = values.get("collapse_document_chain") reduce_chain = ReduceDocumentsChain( combine_documents_chain=combine_chain, collapse_documents_chain=collapse_chain, ) values["reduce_documents_chain"] = reduce_chain del values["combine_document_chain"] if "collapse_document_chain" in values: del values["collapse_document_chain"] return values @root_validator(pre=True) def get_return_intermediate_steps(cls, values: Dict) -> Dict: """For backwards compatibility.""" if "return_map_steps" in values: values["return_intermediate_steps"] = values["return_map_steps"] del values["return_map_steps"] return values @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: llm_chain_variables = values["llm_chain"].prompt.input_variables if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain input_variables" ) else: llm_chain_variables = values["llm_chain"].prompt.input_variables if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values @property def collapse_document_chain(self) -> BaseCombineDocumentsChain: """Kept for backward compatibility.""" if isinstance(self.reduce_documents_chain, ReduceDocumentsChain): if self.reduce_documents_chain.collapse_documents_chain: return self.reduce_documents_chain.collapse_documents_chain else: return self.reduce_documents_chain.combine_documents_chain else: raise ValueError( f"`reduce_documents_chain` is of type " f"{type(self.reduce_documents_chain)} so it does not have " f"this attribute." ) @property def combine_document_chain(self) -> BaseCombineDocumentsChain: """Kept for backward compatibility.""" if isinstance(self.reduce_documents_chain, ReduceDocumentsChain): return self.reduce_documents_chain.combine_documents_chain else: raise ValueError( f"`reduce_documents_chain` is of type " f"{type(self.reduce_documents_chain)} so it does not have " f"this attribute." ) def combine_docs( self, docs: List[Document], token_max: Optional[int] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[str, dict]: """Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents). """ map_results = self.llm_chain.apply( # FYI - this is parallelized and so it is fast. [{self.document_variable_name: d.page_content, **kwargs} for d in docs], callbacks=callbacks, ) question_result_key = self.llm_chain.output_key result_docs = [ Document(page_content=r[question_result_key], metadata=docs[i].metadata) # This uses metadata from the docs, and the textual results from `results` for i, r in enumerate(map_results) ] result, extra_return_dict = self.reduce_documents_chain.combine_docs( result_docs, token_max=token_max, callbacks=callbacks, **kwargs ) if self.return_intermediate_steps: intermediate_steps = [r[question_result_key] for r in map_results] extra_return_dict["intermediate_steps"] = intermediate_steps return result, extra_return_dict async def acombine_docs( self, docs: List[Document], token_max: Optional[int] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[str, dict]: """Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents). """ map_results = await self.llm_chain.aapply( # FYI - this is parallelized and so it is fast. [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], callbacks=callbacks, ) question_result_key = self.llm_chain.output_key result_docs = [ Document(page_content=r[question_result_key], metadata=docs[i].metadata) # This uses metadata from the docs, and the textual results from `results` for i, r in enumerate(map_results) ] result, extra_return_dict = await self.reduce_documents_chain.acombine_docs( result_docs, token_max=token_max, callbacks=callbacks, **kwargs ) if self.return_intermediate_steps: intermediate_steps = [r[question_result_key] for r in map_results] extra_return_dict["intermediate_steps"] = intermediate_steps return result, extra_return_dict @property def _chain_type(self) -> str: return "map_reduce_documents_chain"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~aviary.py
import dataclasses import os from typing import Any, Dict, List, Mapping, Optional, Union, cast import requests from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import Extra, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.llms.utils import enforce_stop_tokens TIMEOUT = 60 @dataclasses.dataclass class AviaryBackend: """Aviary backend. Attributes: backend_url: The URL for the Aviary backend. bearer: The bearer token for the Aviary backend. """ backend_url: str bearer: str def __post_init__(self) -> None: self.header = {"Authorization": self.bearer} @classmethod def from_env(cls) -> "AviaryBackend": aviary_url = os.getenv("AVIARY_URL") assert aviary_url, "AVIARY_URL must be set" aviary_token = os.getenv("AVIARY_TOKEN", "") bearer = f"Bearer {aviary_token}" if aviary_token else "" aviary_url += "/" if not aviary_url.endswith("/") else "" return cls(aviary_url, bearer) def get_models() -> List[str]: """List available models""" backend = AviaryBackend.from_env() request_url = backend.backend_url + "-/routes" response = requests.get(request_url, headers=backend.header, timeout=TIMEOUT) try: result = response.json() except requests.JSONDecodeError as e: raise RuntimeError( f"Error decoding JSON from {request_url}. Text response: {response.text}" ) from e result = sorted( [k.lstrip("/").replace("--", "/") for k in result.keys() if "--" in k] ) return result def get_completions( model: str, prompt: str, use_prompt_format: bool = True, version: str = "", ) -> Dict[str, Union[str, float, int]]: """Get completions from Aviary models.""" backend = AviaryBackend.from_env() url = backend.backend_url + model.replace("/", "--") + "/" + version + "query" response = requests.post( url, headers=backend.header, json={"prompt": prompt, "use_prompt_format": use_prompt_format}, timeout=TIMEOUT, ) try: return response.json() except requests.JSONDecodeError as e: raise RuntimeError( f"Error decoding JSON from {url}. Text response: {response.text}" ) from e class Aviary(LLM): """Aviary hosted models. Aviary is a backend for hosted models. You can find out more about aviary at http://github.com/ray-project/aviary To get a list of the models supported on an aviary, follow the instructions on the website to install the aviary CLI and then use: `aviary models` AVIARY_URL and AVIARY_TOKEN environment variables must be set. Attributes: model: The name of the model to use. Defaults to "amazon/LightGPT". aviary_url: The URL for the Aviary backend. Defaults to None. aviary_token: The bearer token for the Aviary backend. Defaults to None. use_prompt_format: If True, the prompt template for the model will be ignored. Defaults to True. version: API version to use for Aviary. Defaults to None. Example: .. code-block:: python from langchain_community.llms import Aviary os.environ["AVIARY_URL"] = "<URL>" os.environ["AVIARY_TOKEN"] = "<TOKEN>" light = Aviary(model='amazon/LightGPT') output = light('How do you make fried rice?') """ model: str = "amazon/LightGPT" aviary_url: Optional[str] = None aviary_token: Optional[str] = None # If True the prompt template for the model will be ignored. use_prompt_format: bool = True # API version to use for Aviary version: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aviary_url = get_from_dict_or_env(values, "aviary_url", "AVIARY_URL") aviary_token = get_from_dict_or_env(values, "aviary_token", "AVIARY_TOKEN") # Set env viarables for aviary sdk os.environ["AVIARY_URL"] = aviary_url os.environ["AVIARY_TOKEN"] = aviary_token try: aviary_models = get_models() except requests.exceptions.RequestException as e: raise ValueError(e) model = values.get("model") if model and model not in aviary_models: raise ValueError(f"{aviary_url} does not support model {values['model']}.") return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_name": self.model, "aviary_url": self.aviary_url, } @property def _llm_type(self) -> str: """Return type of llm.""" return f"aviary-{self.model.replace('/', '-')}" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aviary Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = aviary("Tell me a joke.") """ kwargs = {"use_prompt_format": self.use_prompt_format} if self.version: kwargs["version"] = self.version output = get_completions( model=self.model, prompt=prompt, **kwargs, ) text = cast(str, output["generated_text"]) if stop: text = enforce_stop_tokens(text, stop) return text
[ "True" ]
2024-01-10
mth93/langchain
libs~langchain~langchain~indexes~graph.py
"""Graph Index Creator.""" from typing import Optional, Type from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.prompts import BasePromptTemplate from libs.core.langchain_core.pydantic_v1 import BaseModel from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, parse_triples from langchain.indexes.prompts.knowledge_triplet_extraction import ( KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) class GraphIndexCreator(BaseModel): """Functionality to create graph index.""" llm: Optional[BaseLanguageModel] = None graph_type: Type[NetworkxEntityGraph] = NetworkxEntityGraph def from_text( self, text: str, prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT ) -> NetworkxEntityGraph: """Create graph index from text.""" if self.llm is None: raise ValueError("llm should not be None") graph = self.graph_type() chain = LLMChain(llm=self.llm, prompt=prompt) output = chain.predict(text=text) knowledge = parse_triples(output) for triple in knowledge: graph.add_triple(triple) return graph async def afrom_text( self, text: str, prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT ) -> NetworkxEntityGraph: """Create graph index from text asynchronously.""" if self.llm is None: raise ValueError("llm should not be None") graph = self.graph_type() chain = LLMChain(llm=self.llm, prompt=prompt) output = await chain.apredict(text=text) knowledge = parse_triples(output) for triple in knowledge: graph.add_triple(triple) return graph
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~evaluation~loading.py
"""Loading datasets and evaluators.""" from typing import Any, Dict, List, Optional, Sequence, Type, Union from libs.core.langchain_core.language_models import BaseLanguageModel from langchain.chains.base import Chain from langchain.chat_models.openai import ChatOpenAI from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain from langchain.evaluation.comparison import PairwiseStringEvalChain from langchain.evaluation.comparison.eval_chain import LabeledPairwiseStringEvalChain from langchain.evaluation.criteria.eval_chain import ( CriteriaEvalChain, LabeledCriteriaEvalChain, ) from langchain.evaluation.embedding_distance.base import ( EmbeddingDistanceEvalChain, PairwiseEmbeddingDistanceEvalChain, ) from langchain.evaluation.exact_match.base import ExactMatchStringEvaluator from langchain.evaluation.parsing.base import ( JsonEqualityEvaluator, JsonValidityEvaluator, ) from langchain.evaluation.parsing.json_distance import JsonEditDistanceEvaluator from langchain.evaluation.parsing.json_schema import JsonSchemaEvaluator from langchain.evaluation.qa import ContextQAEvalChain, CotQAEvalChain, QAEvalChain from langchain.evaluation.regex_match.base import RegexMatchStringEvaluator from langchain.evaluation.schema import EvaluatorType, LLMEvalChain, StringEvaluator from langchain.evaluation.scoring.eval_chain import ( LabeledScoreStringEvalChain, ScoreStringEvalChain, ) from langchain.evaluation.string_distance.base import ( PairwiseStringDistanceEvalChain, StringDistanceEvalChain, ) def load_dataset(uri: str) -> List[Dict]: """Load a dataset from the `LangChainDatasets on HuggingFace <https://huggingface.co/LangChainDatasets>`_. Args: uri: The uri of the dataset to load. Returns: A list of dictionaries, each representing a row in the dataset. **Prerequisites** .. code-block:: shell pip install datasets Examples -------- .. code-block:: python from langchain.evaluation import load_dataset ds = load_dataset("llm-math") """ # noqa: E501 try: from datasets import load_dataset except ImportError: raise ImportError( "load_dataset requires the `datasets` package." " Please install with `pip install datasets`" ) dataset = load_dataset(f"LangChainDatasets/{uri}") return [d for d in dataset["train"]] _EVALUATOR_MAP: Dict[ EvaluatorType, Union[Type[LLMEvalChain], Type[Chain], Type[StringEvaluator]] ] = { EvaluatorType.QA: QAEvalChain, EvaluatorType.COT_QA: CotQAEvalChain, EvaluatorType.CONTEXT_QA: ContextQAEvalChain, EvaluatorType.PAIRWISE_STRING: PairwiseStringEvalChain, EvaluatorType.SCORE_STRING: ScoreStringEvalChain, EvaluatorType.LABELED_PAIRWISE_STRING: LabeledPairwiseStringEvalChain, EvaluatorType.LABELED_SCORE_STRING: LabeledScoreStringEvalChain, EvaluatorType.AGENT_TRAJECTORY: TrajectoryEvalChain, EvaluatorType.CRITERIA: CriteriaEvalChain, EvaluatorType.LABELED_CRITERIA: LabeledCriteriaEvalChain, EvaluatorType.STRING_DISTANCE: StringDistanceEvalChain, EvaluatorType.PAIRWISE_STRING_DISTANCE: PairwiseStringDistanceEvalChain, EvaluatorType.EMBEDDING_DISTANCE: EmbeddingDistanceEvalChain, EvaluatorType.PAIRWISE_EMBEDDING_DISTANCE: PairwiseEmbeddingDistanceEvalChain, EvaluatorType.JSON_VALIDITY: JsonValidityEvaluator, EvaluatorType.JSON_EQUALITY: JsonEqualityEvaluator, EvaluatorType.JSON_EDIT_DISTANCE: JsonEditDistanceEvaluator, EvaluatorType.JSON_SCHEMA_VALIDATION: JsonSchemaEvaluator, EvaluatorType.REGEX_MATCH: RegexMatchStringEvaluator, EvaluatorType.EXACT_MATCH: ExactMatchStringEvaluator, } def load_evaluator( evaluator: EvaluatorType, *, llm: Optional[BaseLanguageModel] = None, **kwargs: Any, ) -> Union[Chain, StringEvaluator]: """Load the requested evaluation chain specified by a string. Parameters ---------- evaluator : EvaluatorType The type of evaluator to load. llm : BaseLanguageModel, optional The language model to use for evaluation, by default None **kwargs : Any Additional keyword arguments to pass to the evaluator. Returns ------- Chain The loaded evaluation chain. Examples -------- >>> from langchain.evaluation import load_evaluator, EvaluatorType >>> evaluator = load_evaluator(EvaluatorType.QA) """ if evaluator not in _EVALUATOR_MAP: raise ValueError( f"Unknown evaluator type: {evaluator}" f"\nValid types are: {list(_EVALUATOR_MAP.keys())}" ) evaluator_cls = _EVALUATOR_MAP[evaluator] if issubclass(evaluator_cls, LLMEvalChain): try: llm = llm or ChatOpenAI( model="gpt-4", model_kwargs={"seed": 42}, temperature=0 ) except Exception as e: raise ValueError( f"Evaluation with the {evaluator_cls} requires a " "language model to function." " Failed to create the default 'gpt-4' model." " Please manually provide an evaluation LLM" " or check your openai credentials." ) from e return evaluator_cls.from_llm(llm=llm, **kwargs) else: return evaluator_cls(**kwargs) def load_evaluators( evaluators: Sequence[EvaluatorType], *, llm: Optional[BaseLanguageModel] = None, config: Optional[dict] = None, **kwargs: Any, ) -> List[Union[Chain, StringEvaluator]]: """Load evaluators specified by a list of evaluator types. Parameters ---------- evaluators : Sequence[EvaluatorType] The list of evaluator types to load. llm : BaseLanguageModel, optional The language model to use for evaluation, if none is provided, a default ChatOpenAI gpt-4 model will be used. config : dict, optional A dictionary mapping evaluator types to additional keyword arguments, by default None **kwargs : Any Additional keyword arguments to pass to all evaluators. Returns ------- List[Chain] The loaded evaluators. Examples -------- >>> from langchain.evaluation import load_evaluators, EvaluatorType >>> evaluators = [EvaluatorType.QA, EvaluatorType.CRITERIA] >>> loaded_evaluators = load_evaluators(evaluators, criteria="helpfulness") """ loaded = [] for evaluator in evaluators: _kwargs = config.get(evaluator, {}) if config else {} loaded.append(load_evaluator(evaluator, llm=llm, **{**kwargs, **_kwargs})) return loaded
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~agents~output_parsers~react_json_single_input.py
import json import re from typing import Union from libs.core.langchain_core.agents import AgentAction, AgentFinish from libs.core.langchain_core.exceptions import OutputParserException from langchain.agents.agent import AgentOutputParser from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS FINAL_ANSWER_ACTION = "Final Answer:" class ReActJsonSingleInputOutputParser(AgentOutputParser): """Parses ReAct-style LLM calls that have a single tool input in json format. Expects output to be in one of two formats. If the output signals that an action should be taken, should be in the below format. This will result in an AgentAction being returned. ``` Thought: agent thought here Action: ``` { "action": "search", "action_input": "what is the temperature in SF" } ``` ``` If the output signals that a final answer should be given, should be in the below format. This will result in an AgentFinish being returned. ``` Thought: agent thought here Final Answer: The temperature is 100 degrees ``` """ pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL) """Regex pattern to parse the output.""" def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS def parse(self, text: str) -> Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text try: found = self.pattern.search(text) if not found: # Fast fail to parse Final Answer. raise ValueError("action not found") action = found.group(1) response = json.loads(action.strip()) includes_action = "action" in response if includes_answer and includes_action: raise OutputParserException( "Parsing LLM output produced a final answer " f"and a parse-able action: {text}" ) return AgentAction( response["action"], response.get("action_input", {}), text ) except Exception: if not includes_answer: raise OutputParserException(f"Could not parse LLM output: {text}") output = text.split(FINAL_ANSWER_ACTION)[-1].strip() return AgentFinish({"output": output}, text) @property def _type(self) -> str: return "react-json-single-input"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~llamacpp.py
from __future__ import annotations import logging from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.outputs import GenerationChunk from libs.core.langchain_core.pydantic_v1 import Field, root_validator from libs.core.langchain_core.utils import get_pydantic_field_names from libs.core.langchain_core.utils.utils import build_extra_kwargs if TYPE_CHECKING: from llama_cpp import LlamaGrammar logger = logging.getLogger(__name__) class LlamaCpp(LLM): """llama.cpp model. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain_community.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/llama/model") """ client: Any #: :meta private: model_path: str """The path to the Llama model file.""" lora_base: Optional[str] = None """The path to the Llama LoRA base model.""" lora_path: Optional[str] = None """The path to the Llama LoRA. If None, no LoRa is loaded.""" n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(True, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" suffix: Optional[str] = Field(None) """A suffix to append to the generated text. If None, no suffix is appended.""" max_tokens: Optional[int] = 256 """The maximum number of tokens to generate.""" temperature: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" logprobs: Optional[int] = Field(None) """The number of logprobs to return. If None, no logprobs are returned.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_penalty: Optional[float] = 1.1 """The penalty to apply to repeated tokens.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" last_n_tokens_size: Optional[int] = 64 """The number of tokens to look back when applying the repeat_penalty.""" use_mmap: Optional[bool] = True """Whether to keep the model loaded in RAM""" rope_freq_scale: float = 1.0 """Scale factor for rope sampling.""" rope_freq_base: float = 10000.0 """Base frequency for rope sampling.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Any additional parameters to pass to llama_cpp.Llama.""" streaming: bool = True """Whether to stream the results, token by token.""" grammar_path: Optional[Union[str, Path]] = None """ grammar_path: Path to the .gbnf file that defines formal grammars for constraining model outputs. For instance, the grammar can be used to force the model to generate valid JSON or to speak exclusively in emojis. At most one of grammar_path and grammar should be passed in. """ grammar: Optional[Union[str, LlamaGrammar]] = None """ grammar: formal grammar for constraining model outputs. For instance, the grammar can be used to force the model to generate valid JSON or to speak exclusively in emojis. At most one of grammar_path and grammar should be passed in. """ verbose: bool = True """Print verbose output to stderr.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" try: from llama_cpp import Llama, LlamaGrammar except ImportError: raise ImportError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) model_path = values["model_path"] model_param_names = [ "rope_freq_scale", "rope_freq_base", "lora_path", "lora_base", "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", "use_mmap", "last_n_tokens_size", "verbose", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] model_params.update(values["model_kwargs"]) try: values["client"] = Llama(model_path, **model_params) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) if values["grammar"] and values["grammar_path"]: grammar = values["grammar"] grammar_path = values["grammar_path"] raise ValueError( "Can only pass in one of grammar and grammar_path. Received " f"{grammar=} and {grammar_path=}." ) elif isinstance(values["grammar"], str): values["grammar"] = LlamaGrammar.from_string(values["grammar"]) elif values["grammar_path"]: values["grammar"] = LlamaGrammar.from_file(values["grammar_path"]) else: pass return values @root_validator(pre=True) def build_model_kwargs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) values["model_kwargs"] = build_extra_kwargs( extra, values, all_required_field_names ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling llama_cpp.""" params = { "suffix": self.suffix, "max_tokens": self.max_tokens, "temperature": self.temperature, "top_p": self.top_p, "logprobs": self.logprobs, "echo": self.echo, "stop_sequences": self.stop, # key here is convention among LLM classes "repeat_penalty": self.repeat_penalty, "top_k": self.top_k, } if self.grammar: params["grammar"] = self.grammar return params @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_path": self.model_path}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "llamacpp" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing parameters in format needed by llama_cpp. Args: stop (Optional[List[str]]): List of stop sequences for llama_cpp. Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params if self.stop and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params # llama_cpp expects the "stop" key not this, so we remove it: params.pop("stop_sequences") # then sets it as configured, or default to an empty list: params["stop"] = self.stop or stop or [] return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the Llama model and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain_community.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/local/llama/model.bin") llm("This is a prompt.") """ if self.streaming: # If streaming is enabled, we use the stream # method that yields as they are generated # and return the combined strings from the first choices's text: combined_text_output = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs, ): combined_text_output += chunk.text return combined_text_output else: params = self._get_parameters(stop) params = {**params, **kwargs} result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like objects containing a string token and metadata. See llama-cpp-python docs and below for more. Example: .. code-block:: python from langchain_community.llms import LlamaCpp llm = LlamaCpp( model_path="/path/to/local/model.bin", temperature = 0.5 ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): result = chunk["choices"][0] print(result["text"], end='', flush=True) """ params = {**self._get_parameters(stop), **kwargs} result = self.client(prompt=prompt, stream=True, **params) for part in result: logprobs = part["choices"][0].get("logprobs", None) chunk = GenerationChunk( text=part["choices"][0]["text"], generation_info={"logprobs": logprobs}, ) yield chunk if run_manager: run_manager.on_llm_new_token( token=chunk.text, verbose=self.verbose, log_probs=logprobs ) def get_num_tokens(self, text: str) -> int: tokenized_text = self.client.tokenize(text.encode("utf-8")) return len(tokenized_text)
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~agents~test_agent_iterator.py
from uuid import UUID import pytest from libs.core.langchain_core.tools import Tool from langchain.agents import ( AgentExecutor, AgentExecutorIterator, AgentType, initialize_agent, ) from langchain.llms import FakeListLLM from langchain.schema import RUN_KEY from tests.unit_tests.agents.test_agent import _get_agent from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_agent_iterator_bad_action() -> None: """Test react chain iterator when bad action given.""" agent = _get_agent() agent_iter = agent.iter(inputs="when was langchain made") outputs = [] for step in agent_iter: outputs.append(step) assert isinstance(outputs[-1], dict) assert outputs[-1]["output"] == "curses foiled again" def test_agent_iterator_stopped_early() -> None: """ Test react chain iterator when max iterations or max execution time is exceeded. """ # iteration limit agent = _get_agent(max_iterations=1) agent_iter = agent.iter(inputs="when was langchain made") outputs = [] for step in agent_iter: outputs.append(step) # NOTE: we don't use agent.run like in the test for the regular agent executor, # so the dict structure for outputs stays intact assert isinstance(outputs[-1], dict) assert ( outputs[-1]["output"] == "Agent stopped due to iteration limit or time limit." ) # execution time limit agent = _get_agent(max_execution_time=1e-5) agent_iter = agent.iter(inputs="when was langchain made") outputs = [] for step in agent_iter: outputs.append(step) assert isinstance(outputs[-1], dict) assert ( outputs[-1]["output"] == "Agent stopped due to iteration limit or time limit." ) async def test_agent_async_iterator_stopped_early() -> None: """ Test react chain async iterator when max iterations or max execution time is exceeded. """ # iteration limit agent = _get_agent(max_iterations=1) agent_async_iter = agent.iter(inputs="when was langchain made") outputs = [] assert isinstance(agent_async_iter, AgentExecutorIterator) async for step in agent_async_iter: outputs.append(step) assert isinstance(outputs[-1], dict) assert ( outputs[-1]["output"] == "Agent stopped due to iteration limit or time limit." ) # execution time limit agent = _get_agent(max_execution_time=1e-5) agent_async_iter = agent.iter(inputs="when was langchain made") assert isinstance(agent_async_iter, AgentExecutorIterator) outputs = [] async for step in agent_async_iter: outputs.append(step) assert ( outputs[-1]["output"] == "Agent stopped due to iteration limit or time limit." ) def test_agent_iterator_with_callbacks() -> None: """Test react chain iterator with callbacks by setting verbose globally.""" handler1 = FakeCallbackHandler() handler2 = FakeCallbackHandler() bad_action_name = "BadAction" responses = [ f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment", "Oh well\nFinal Answer: curses foiled again", ] fake_llm = FakeListLLM(cache=False, responses=responses, callbacks=[handler2]) tools = [ Tool( name="Search", func=lambda x: x, description="Useful for searching", ), Tool( name="Lookup", func=lambda x: x, description="Useful for looking up things in a table", ), ] agent = initialize_agent( tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, ) agent_iter = agent.iter( inputs="when was langchain made", callbacks=[handler1], include_run_info=True ) outputs = [] for step in agent_iter: outputs.append(step) assert isinstance(outputs[-1], dict) assert outputs[-1]["output"] == "curses foiled again" assert isinstance(outputs[-1][RUN_KEY].run_id, UUID) # 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run assert handler1.chain_starts == handler1.chain_ends == 3 assert handler1.llm_starts == handler1.llm_ends == 2 assert handler1.tool_starts == 1 assert handler1.tool_ends == 1 # 1 extra agent action assert handler1.starts == 7 # 1 extra agent end assert handler1.ends == 7 print("h:", handler1) assert handler1.errors == 0 # during LLMChain assert handler1.text == 2 assert handler2.llm_starts == 2 assert handler2.llm_ends == 2 assert ( handler2.chain_starts == handler2.tool_starts == handler2.tool_ends == handler2.chain_ends == 0 ) async def test_agent_async_iterator_with_callbacks() -> None: """Test react chain async iterator with callbacks by setting verbose globally.""" handler1 = FakeCallbackHandler() handler2 = FakeCallbackHandler() bad_action_name = "BadAction" responses = [ f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment", "Oh well\nFinal Answer: curses foiled again", ] fake_llm = FakeListLLM(cache=False, responses=responses, callbacks=[handler2]) tools = [ Tool( name="Search", func=lambda x: x, description="Useful for searching", ), Tool( name="Lookup", func=lambda x: x, description="Useful for looking up things in a table", ), ] agent = initialize_agent( tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) agent_async_iter = agent.iter( inputs="when was langchain made", callbacks=[handler1], include_run_info=True, ) assert isinstance(agent_async_iter, AgentExecutorIterator) outputs = [] async for step in agent_async_iter: outputs.append(step) assert outputs[-1]["output"] == "curses foiled again" assert isinstance(outputs[-1][RUN_KEY].run_id, UUID) # 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run assert handler1.chain_starts == handler1.chain_ends == 3 assert handler1.llm_starts == handler1.llm_ends == 2 assert handler1.tool_starts == 1 assert handler1.tool_ends == 1 # 1 extra agent action assert handler1.starts == 7 # 1 extra agent end assert handler1.ends == 7 assert handler1.errors == 0 # during LLMChain assert handler1.text == 2 assert handler2.llm_starts == 2 assert handler2.llm_ends == 2 assert ( handler2.chain_starts == handler2.tool_starts == handler2.tool_ends == handler2.chain_ends == 0 ) def test_agent_iterator_properties_and_setters() -> None: """Test properties and setters of AgentExecutorIterator.""" agent = _get_agent() agent.tags = None agent_iter = agent.iter(inputs="when was langchain made") assert isinstance(agent_iter, AgentExecutorIterator) assert isinstance(agent_iter.inputs, dict) assert isinstance(agent_iter.callbacks, type(None)) assert isinstance(agent_iter.tags, type(None)) assert isinstance(agent_iter.agent_executor, AgentExecutor) agent_iter.inputs = "New input" # type: ignore assert isinstance(agent_iter.inputs, dict) agent_iter.callbacks = [FakeCallbackHandler()] assert isinstance(agent_iter.callbacks, list) agent_iter.tags = ["test"] assert isinstance(agent_iter.tags, list) new_agent = _get_agent() agent_iter.agent_executor = new_agent assert isinstance(agent_iter.agent_executor, AgentExecutor) def test_agent_iterator_reset() -> None: """Test reset functionality of AgentExecutorIterator.""" agent = _get_agent() agent_iter = agent.iter(inputs="when was langchain made") assert isinstance(agent_iter, AgentExecutorIterator) # Perform one iteration iterator = iter(agent_iter) next(iterator) # Check if properties are updated assert agent_iter.iterations == 1 assert agent_iter.time_elapsed > 0.0 assert agent_iter.intermediate_steps # Reset the iterator agent_iter.reset() # Check if properties are reset assert agent_iter.iterations == 0 assert agent_iter.time_elapsed == 0.0 assert not agent_iter.intermediate_steps def test_agent_iterator_output_structure() -> None: """Test the output structure of AgentExecutorIterator.""" agent = _get_agent() agent_iter = agent.iter(inputs="when was langchain made") for step in agent_iter: assert isinstance(step, dict) if "intermediate_step" in step: assert isinstance(step["intermediate_step"], list) elif "output" in step: assert isinstance(step["output"], str) else: assert False, "Unexpected output structure" async def test_agent_async_iterator_output_structure() -> None: """Test the async output structure of AgentExecutorIterator.""" agent = _get_agent() agent_async_iter = agent.iter(inputs="when was langchain made", async_=True) assert isinstance(agent_async_iter, AgentExecutorIterator) async for step in agent_async_iter: assert isinstance(step, dict) if "intermediate_step" in step: assert isinstance(step["intermediate_step"], list) elif "output" in step: assert isinstance(step["output"], str) else: assert False, "Unexpected output structure" def test_agent_iterator_empty_input() -> None: """Test AgentExecutorIterator with empty input.""" agent = _get_agent() agent_iter = agent.iter(inputs="") outputs = [] for step in agent_iter: outputs.append(step) assert isinstance(outputs[-1], dict) assert outputs[-1]["output"] # Check if there is an output def test_agent_iterator_custom_stopping_condition() -> None: """Test AgentExecutorIterator with a custom stopping condition.""" agent = _get_agent() class CustomAgentExecutorIterator(AgentExecutorIterator): def _should_continue(self) -> bool: return self.iterations < 2 # Custom stopping condition agent_iter = CustomAgentExecutorIterator(agent, inputs="when was langchain made") outputs = [] for step in agent_iter: outputs.append(step) assert len(outputs) == 2 # Check if the custom stopping condition is respected def test_agent_iterator_failing_tool() -> None: """Test AgentExecutorIterator with a tool that raises an exception.""" # Get agent for testing. bad_action_name = "FailingTool" responses = [ f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment", "Oh well\nFinal Answer: curses foiled again", ] fake_llm = FakeListLLM(responses=responses) tools = [ Tool( name="FailingTool", func=lambda x: 1 / 0, # This tool will raise a ZeroDivisionError description="A tool that fails", ), ] agent = initialize_agent( tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) agent_iter = agent.iter(inputs="when was langchain made") assert isinstance(agent_iter, AgentExecutorIterator) # initialize iterator iterator = iter(agent_iter) with pytest.raises(ZeroDivisionError): next(iterator)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~pubmed.py
from typing import Iterator, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.utilities.pubmed import PubMedAPIWrapper class PubMedLoader(BaseLoader): """Load from the `PubMed` biomedical library. Attributes: query: The query to be passed to the PubMed API. load_max_docs: The maximum number of documents to load. """ def __init__( self, query: str, load_max_docs: Optional[int] = 3, ): """Initialize the PubMedLoader. Args: query: The query to be passed to the PubMed API. load_max_docs: The maximum number of documents to load. Defaults to 3. """ self.query = query self.load_max_docs = load_max_docs self._client = PubMedAPIWrapper( top_k_results=load_max_docs, ) def load(self) -> List[Document]: return list(self._client.lazy_load_docs(self.query)) def lazy_load(self) -> Iterator[Document]: for doc in self._client.lazy_load_docs(self.query): yield doc
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~dropbox.py
# Prerequisites: # 1. Create a Dropbox app. # 2. Give the app these scope permissions: `files.metadata.read` # and `files.content.read`. # 3. Generate access token: https://www.dropbox.com/developers/apps/create. # 4. `pip install dropbox` (requires `pip install unstructured[pdf]` for PDF filetype). import os import tempfile from pathlib import Path from typing import Any, Dict, List, Optional from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator from langchain_community.document_loaders.base import BaseLoader class DropboxLoader(BaseLoader, BaseModel): """Load files from `Dropbox`. In addition to common files such as text and PDF files, it also supports *Dropbox Paper* files. """ dropbox_access_token: str """Dropbox access token.""" dropbox_folder_path: Optional[str] = None """The folder path to load from.""" dropbox_file_paths: Optional[List[str]] = None """The file paths to load from.""" recursive: bool = False """Flag to indicate whether to load files recursively from subfolders.""" @root_validator def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either folder_path or file_paths is set, but not both.""" if ( values.get("dropbox_folder_path") is not None and values.get("dropbox_file_paths") is not None ): raise ValueError("Cannot specify both folder_path and file_paths") if values.get("dropbox_folder_path") is None and not values.get( "dropbox_file_paths" ): raise ValueError("Must specify either folder_path or file_paths") return values def _create_dropbox_client(self) -> Any: """Create a Dropbox client.""" try: from dropbox import Dropbox, exceptions except ImportError: raise ImportError("You must run " "`pip install dropbox") try: dbx = Dropbox(self.dropbox_access_token) dbx.users_get_current_account() except exceptions.AuthError as ex: raise ValueError( "Invalid Dropbox access token. Please verify your token and try again." ) from ex return dbx def _load_documents_from_folder(self, folder_path: str) -> List[Document]: """Load documents from a Dropbox folder.""" dbx = self._create_dropbox_client() try: from dropbox import exceptions from dropbox.files import FileMetadata except ImportError: raise ImportError("You must run " "`pip install dropbox") try: results = dbx.files_list_folder(folder_path, recursive=self.recursive) except exceptions.ApiError as ex: raise ValueError( f"Could not list files in the folder: {folder_path}. " "Please verify the folder path and try again." ) from ex files = [entry for entry in results.entries if isinstance(entry, FileMetadata)] documents = [ doc for doc in (self._load_file_from_path(file.path_display) for file in files) if doc is not None ] return documents def _load_file_from_path(self, file_path: str) -> Optional[Document]: """Load a file from a Dropbox path.""" dbx = self._create_dropbox_client() try: from dropbox import exceptions except ImportError: raise ImportError("You must run " "`pip install dropbox") try: file_metadata = dbx.files_get_metadata(file_path) if file_metadata.is_downloadable: _, response = dbx.files_download(file_path) # Some types such as Paper, need to be exported. elif file_metadata.export_info: _, response = dbx.files_export(file_path, "markdown") except exceptions.ApiError as ex: raise ValueError( f"Could not load file: {file_path}. Please verify the file path" "and try again." ) from ex try: text = response.content.decode("utf-8") except UnicodeDecodeError: file_extension = os.path.splitext(file_path)[1].lower() if file_extension == ".pdf": print(f"File {file_path} type detected as .pdf") from langchain_community.document_loaders import UnstructuredPDFLoader # Download it to a temporary file. temp_dir = tempfile.TemporaryDirectory() temp_pdf = Path(temp_dir.name) / "tmp.pdf" with open(temp_pdf, mode="wb") as f: f.write(response.content) try: loader = UnstructuredPDFLoader(str(temp_pdf)) docs = loader.load() if docs: return docs[0] except Exception as pdf_ex: print(f"Error while trying to parse PDF {file_path}: {pdf_ex}") return None else: print( f"File {file_path} could not be decoded as pdf or text. Skipping." ) return None metadata = { "source": f"dropbox://{file_path}", "title": os.path.basename(file_path), } return Document(page_content=text, metadata=metadata) def _load_documents_from_paths(self) -> List[Document]: """Load documents from a list of Dropbox file paths.""" if not self.dropbox_file_paths: raise ValueError("file_paths must be set") return [ doc for doc in ( self._load_file_from_path(file_path) for file_path in self.dropbox_file_paths ) if doc is not None ] def load(self) -> List[Document]: """Load documents.""" if self.dropbox_folder_path is not None: return self._load_documents_from_folder(self.dropbox_folder_path) else: return self._load_documents_from_paths()
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~storage~in_memory.py
"""In memory store that is not thread safe and has no eviction policy. This is a simple implementation of the BaseStore using a dictionary that is useful primarily for unit testing purposes. """ from typing import ( Any, Dict, Generic, Iterator, List, Optional, Sequence, Tuple, TypeVar, ) from libs.core.langchain_core.stores import BaseStore V = TypeVar("V") class InMemoryBaseStore(BaseStore[str, V], Generic[V]): """In-memory implementation of the BaseStore using a dictionary. Attributes: store (Dict[str, Any]): The underlying dictionary that stores the key-value pairs. Examples: .. code-block:: python from langchain.storage import InMemoryStore store = InMemoryStore() store.mset([('key1', 'value1'), ('key2', 'value2')]) store.mget(['key1', 'key2']) # ['value1', 'value2'] store.mdelete(['key1']) list(store.yield_keys()) # ['key2'] list(store.yield_keys(prefix='k')) # ['key2'] """ def __init__(self) -> None: """Initialize an empty store.""" self.store: Dict[str, V] = {} def mget(self, keys: Sequence[str]) -> List[Optional[V]]: """Get the values associated with the given keys. Args: keys (Sequence[str]): A sequence of keys. Returns: A sequence of optional values associated with the keys. If a key is not found, the corresponding value will be None. """ return [self.store.get(key) for key in keys] def mset(self, key_value_pairs: Sequence[Tuple[str, V]]) -> None: """Set the values for the given keys. Args: key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs. Returns: None """ for key, value in key_value_pairs: self.store[key] = value def mdelete(self, keys: Sequence[str]) -> None: """Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. """ for key in keys: if key in self.store: del self.store[key] def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]: """Get an iterator over keys that match the given prefix. Args: prefix (str, optional): The prefix to match. Defaults to None. Returns: Iterator[str]: An iterator over keys that match the given prefix. """ if prefix is None: yield from self.store.keys() else: for key in self.store.keys(): if key.startswith(prefix): yield key InMemoryStore = InMemoryBaseStore[Any] InMemoryByteStore = InMemoryBaseStore[bytes]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~agents~load_tools.py
# flake8: noqa """Tools provide access to various resources and services. LangChain has a large ecosystem of integrations with various external resources like local and remote file systems, APIs and databases. These integrations allow developers to create versatile applications that combine the power of LLMs with the ability to access, interact with and manipulate external resources. When developing an application, developers should inspect the capabilities and permissions of the tools that underlie the given agent toolkit, and determine whether permissions of the given toolkit are appropriate for the application. See [Security](https://python.langchain.com/docs/security) for more information. """ import warnings from typing import Any, Dict, List, Optional, Callable, Tuple from mypy_extensions import Arg, KwArg from langchain.agents.tools import Tool from libs.core.langchain_core.language_models import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs from langchain.chains.api.base import APIChain from langchain.chains.llm_math.base import LLMMathChain from langchain.utilities.dalle_image_generator import DallEAPIWrapper from langchain.utilities.requests import TextRequestsWrapper from langchain.tools.arxiv.tool import ArxivQueryRun from langchain.tools.golden_query.tool import GoldenQueryRun from langchain.tools.pubmed.tool import PubmedQueryRun from libs.core.langchain_core.tools import BaseTool from langchain.tools.bing_search.tool import BingSearchRun from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun from langchain.tools.google_cloud.texttospeech import GoogleCloudTextToSpeechTool from langchain.tools.google_lens.tool import GoogleLensQueryRun from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun from langchain.tools.google_scholar.tool import GoogleScholarQueryRun from langchain.tools.google_finance.tool import GoogleFinanceQueryRun from langchain.tools.google_trends.tool import GoogleTrendsQueryRun from langchain.tools.metaphor_search.tool import MetaphorSearchResults from langchain.tools.google_jobs.tool import GoogleJobsQueryRun from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun from langchain.tools.searchapi.tool import SearchAPIResults, SearchAPIRun from langchain.tools.graphql.tool import BaseGraphQLTool from langchain.tools.human.tool import HumanInputRun from langchain.tools.requests.tool import ( RequestsDeleteTool, RequestsGetTool, RequestsPatchTool, RequestsPostTool, RequestsPutTool, ) from langchain.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool from langchain.tools.scenexplain.tool import SceneXplainTool from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun from langchain.tools.shell.tool import ShellTool from langchain.tools.sleep.tool import SleepTool from langchain.tools.stackexchange.tool import StackExchangeTool from langchain.tools.merriam_webster.tool import MerriamWebsterQueryRun from langchain.tools.wikipedia.tool import WikipediaQueryRun from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun from langchain.tools.dataforseo_api_search import DataForSeoAPISearchRun from langchain.tools.dataforseo_api_search import DataForSeoAPISearchResults from langchain.tools.memorize.tool import Memorize from langchain.tools.reddit_search.tool import RedditSearchRun from langchain.utilities.arxiv import ArxivAPIWrapper from langchain.utilities.golden_query import GoldenQueryAPIWrapper from langchain.utilities.pubmed import PubMedAPIWrapper from langchain.utilities.bing_search import BingSearchAPIWrapper from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper from langchain.utilities.google_lens import GoogleLensAPIWrapper from langchain.utilities.google_jobs import GoogleJobsAPIWrapper from langchain.utilities.google_search import GoogleSearchAPIWrapper from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.google_scholar import GoogleScholarAPIWrapper from langchain.utilities.google_finance import GoogleFinanceAPIWrapper from langchain.utilities.google_trends import GoogleTrendsAPIWrapper from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper from langchain.utilities.awslambda import LambdaWrapper from langchain.utilities.graphql import GraphQLAPIWrapper from langchain.utilities.searchapi import SearchApiAPIWrapper from langchain.utilities.searx_search import SearxSearchWrapper from langchain.utilities.serpapi import SerpAPIWrapper from langchain.utilities.stackexchange import StackExchangeAPIWrapper from langchain.utilities.twilio import TwilioAPIWrapper from langchain.utilities.merriam_webster import MerriamWebsterAPIWrapper from langchain.utilities.wikipedia import WikipediaAPIWrapper from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper from langchain.utilities.reddit_search import RedditSearchAPIWrapper def _get_python_repl() -> BaseTool: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def _get_tools_requests_get() -> BaseTool: return RequestsGetTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_post() -> BaseTool: return RequestsPostTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_patch() -> BaseTool: return RequestsPatchTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_put() -> BaseTool: return RequestsPutTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_delete() -> BaseTool: return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper()) def _get_terminal() -> BaseTool: return ShellTool() def _get_sleep() -> BaseTool: return SleepTool() _BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = { "requests": _get_tools_requests_get, # preserved for backwards compatibility "requests_get": _get_tools_requests_get, "requests_post": _get_tools_requests_post, "requests_patch": _get_tools_requests_patch, "requests_put": _get_tools_requests_put, "requests_delete": _get_tools_requests_delete, "terminal": _get_terminal, "sleep": _get_sleep, } def _get_llm_math(llm: BaseLanguageModel) -> BaseTool: return Tool( name="Calculator", description="Useful for when you need to answer questions about math.", func=LLMMathChain.from_llm(llm=llm).run, coroutine=LLMMathChain.from_llm(llm=llm).arun, ) def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool: chain = APIChain.from_llm_and_api_docs( llm, open_meteo_docs.OPEN_METEO_DOCS, limit_to_domains=["https://api.open-meteo.com/"], ) return Tool( name="Open-Meteo-API", description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.", func=chain.run, ) _LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = { "llm-math": _get_llm_math, "open-meteo-api": _get_open_meteo_api, } def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool: news_api_key = kwargs["news_api_key"] chain = APIChain.from_llm_and_api_docs( llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}, limit_to_domains=["https://newsapi.org/"], ) return Tool( name="News-API", description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool: tmdb_bearer_token = kwargs["tmdb_bearer_token"] chain = APIChain.from_llm_and_api_docs( llm, tmdb_docs.TMDB_DOCS, headers={"Authorization": f"Bearer {tmdb_bearer_token}"}, limit_to_domains=["https://api.themoviedb.org/"], ) return Tool( name="TMDB-API", description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool: listen_api_key = kwargs["listen_api_key"] chain = APIChain.from_llm_and_api_docs( llm, podcast_docs.PODCAST_DOCS, headers={"X-ListenAPI-Key": listen_api_key}, limit_to_domains=["https://listen-api.listennotes.com/"], ) return Tool( name="Podcast-API", description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_lambda_api(**kwargs: Any) -> BaseTool: return Tool( name=kwargs["awslambda_tool_name"], description=kwargs["awslambda_tool_description"], func=LambdaWrapper(**kwargs).run, ) def _get_wolfram_alpha(**kwargs: Any) -> BaseTool: return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs)) def _get_google_search(**kwargs: Any) -> BaseTool: return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs)) def _get_merriam_webster(**kwargs: Any) -> BaseTool: return MerriamWebsterQueryRun(api_wrapper=MerriamWebsterAPIWrapper(**kwargs)) def _get_wikipedia(**kwargs: Any) -> BaseTool: return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs)) def _get_arxiv(**kwargs: Any) -> BaseTool: return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs)) def _get_golden_query(**kwargs: Any) -> BaseTool: return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs)) def _get_pubmed(**kwargs: Any) -> BaseTool: return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs)) def _get_google_jobs(**kwargs: Any) -> BaseTool: return GoogleJobsQueryRun(api_wrapper=GoogleJobsAPIWrapper(**kwargs)) def _get_google_lens(**kwargs: Any) -> BaseTool: return GoogleLensQueryRun(api_wrapper=GoogleLensAPIWrapper(**kwargs)) def _get_google_serper(**kwargs: Any) -> BaseTool: return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs)) def _get_google_scholar(**kwargs: Any) -> BaseTool: return GoogleScholarQueryRun(api_wrapper=GoogleScholarAPIWrapper(**kwargs)) def _get_google_finance(**kwargs: Any) -> BaseTool: return GoogleFinanceQueryRun(api_wrapper=GoogleFinanceAPIWrapper(**kwargs)) def _get_google_trends(**kwargs: Any) -> BaseTool: return GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper(**kwargs)) def _get_google_serper_results_json(**kwargs: Any) -> BaseTool: return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs)) def _get_google_search_results_json(**kwargs: Any) -> BaseTool: return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs)) def _get_searchapi(**kwargs: Any) -> BaseTool: return SearchAPIRun(api_wrapper=SearchApiAPIWrapper(**kwargs)) def _get_searchapi_results_json(**kwargs: Any) -> BaseTool: return SearchAPIResults(api_wrapper=SearchApiAPIWrapper(**kwargs)) def _get_serpapi(**kwargs: Any) -> BaseTool: return Tool( name="Search", description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.", func=SerpAPIWrapper(**kwargs).run, coroutine=SerpAPIWrapper(**kwargs).arun, ) def _get_stackexchange(**kwargs: Any) -> BaseTool: return StackExchangeTool(api_wrapper=StackExchangeAPIWrapper(**kwargs)) def _get_dalle_image_generator(**kwargs: Any) -> Tool: return Tool( "Dall-E-Image-Generator", DallEAPIWrapper(**kwargs).run, "A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description.", ) def _get_twilio(**kwargs: Any) -> BaseTool: return Tool( name="Text-Message", description="Useful for when you need to send a text message to a provided phone number.", func=TwilioAPIWrapper(**kwargs).run, ) def _get_searx_search(**kwargs: Any) -> BaseTool: return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs)) def _get_searx_search_results_json(**kwargs: Any) -> BaseTool: wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"} return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs) def _get_bing_search(**kwargs: Any) -> BaseTool: return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs)) def _get_metaphor_search(**kwargs: Any) -> BaseTool: return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs)) def _get_ddg_search(**kwargs: Any) -> BaseTool: return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs)) def _get_human_tool(**kwargs: Any) -> BaseTool: return HumanInputRun(**kwargs) def _get_scenexplain(**kwargs: Any) -> BaseTool: return SceneXplainTool(**kwargs) def _get_graphql_tool(**kwargs: Any) -> BaseTool: graphql_endpoint = kwargs["graphql_endpoint"] wrapper = GraphQLAPIWrapper(graphql_endpoint=graphql_endpoint) return BaseGraphQLTool(graphql_wrapper=wrapper) def _get_openweathermap(**kwargs: Any) -> BaseTool: return OpenWeatherMapQueryRun(api_wrapper=OpenWeatherMapAPIWrapper(**kwargs)) def _get_dataforseo_api_search(**kwargs: Any) -> BaseTool: return DataForSeoAPISearchRun(api_wrapper=DataForSeoAPIWrapper(**kwargs)) def _get_dataforseo_api_search_json(**kwargs: Any) -> BaseTool: return DataForSeoAPISearchResults(api_wrapper=DataForSeoAPIWrapper(**kwargs)) def _get_eleven_labs_text2speech(**kwargs: Any) -> BaseTool: return ElevenLabsText2SpeechTool(**kwargs) def _get_memorize(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool: return Memorize(llm=llm) def _get_google_cloud_texttospeech(**kwargs: Any) -> BaseTool: return GoogleCloudTextToSpeechTool(**kwargs) def _get_reddit_search(**kwargs: Any) -> BaseTool: return RedditSearchRun(api_wrapper=RedditSearchAPIWrapper(**kwargs)) _EXTRA_LLM_TOOLS: Dict[ str, Tuple[Callable[[Arg(BaseLanguageModel, "llm"), KwArg(Any)], BaseTool], List[str]], ] = { "news-api": (_get_news_api, ["news_api_key"]), "tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]), "podcast-api": (_get_podcast_api, ["listen_api_key"]), "memorize": (_get_memorize, []), } _EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = { "wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]), "google-search": (_get_google_search, ["google_api_key", "google_cse_id"]), "google-search-results-json": ( _get_google_search_results_json, ["google_api_key", "google_cse_id", "num_results"], ), "searx-search-results-json": ( _get_searx_search_results_json, ["searx_host", "engines", "num_results", "aiosession"], ), "bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]), "metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]), "ddg-search": (_get_ddg_search, []), "google-lens": (_get_google_lens, ["serp_api_key"]), "google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]), "google-scholar": ( _get_google_scholar, ["top_k_results", "hl", "lr", "serp_api_key"], ), "google-finance": ( _get_google_finance, ["serp_api_key"], ), "google-trends": ( _get_google_trends, ["serp_api_key"], ), "google-jobs": ( _get_google_jobs, ["serp_api_key"], ), "google-serper-results-json": ( _get_google_serper_results_json, ["serper_api_key", "aiosession"], ), "searchapi": (_get_searchapi, ["searchapi_api_key", "aiosession"]), "searchapi-results-json": ( _get_searchapi_results_json, ["searchapi_api_key", "aiosession"], ), "serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]), "dalle-image-generator": (_get_dalle_image_generator, ["openai_api_key"]), "twilio": (_get_twilio, ["account_sid", "auth_token", "from_number"]), "searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]), "merriam-webster": (_get_merriam_webster, ["merriam_webster_api_key"]), "wikipedia": (_get_wikipedia, ["top_k_results", "lang"]), "arxiv": ( _get_arxiv, ["top_k_results", "load_max_docs", "load_all_available_meta"], ), "golden-query": (_get_golden_query, ["golden_api_key"]), "pubmed": (_get_pubmed, ["top_k_results"]), "human": (_get_human_tool, ["prompt_func", "input_func"]), "awslambda": ( _get_lambda_api, ["awslambda_tool_name", "awslambda_tool_description", "function_name"], ), "stackexchange": (_get_stackexchange, []), "sceneXplain": (_get_scenexplain, []), "graphql": (_get_graphql_tool, ["graphql_endpoint"]), "openweathermap-api": (_get_openweathermap, ["openweathermap_api_key"]), "dataforseo-api-search": ( _get_dataforseo_api_search, ["api_login", "api_password", "aiosession"], ), "dataforseo-api-search-json": ( _get_dataforseo_api_search_json, ["api_login", "api_password", "aiosession"], ), "eleven_labs_text2speech": (_get_eleven_labs_text2speech, ["eleven_api_key"]), "google_cloud_texttospeech": (_get_google_cloud_texttospeech, []), "reddit_search": ( _get_reddit_search, ["reddit_client_id", "reddit_client_secret", "reddit_user_agent"], ), } def _handle_callbacks( callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks ) -> Callbacks: if callback_manager is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) if callbacks is not None: raise ValueError( "Cannot specify both callback_manager and callbacks arguments." ) return callback_manager return callbacks def load_huggingface_tool( task_or_repo_id: str, model_repo_id: Optional[str] = None, token: Optional[str] = None, remote: bool = False, **kwargs: Any, ) -> BaseTool: """Loads a tool from the HuggingFace Hub. Args: task_or_repo_id: Task or model repo id. model_repo_id: Optional model repo id. token: Optional token. remote: Optional remote. Defaults to False. **kwargs: Returns: A tool. """ try: from transformers import load_tool except ImportError: raise ImportError( "HuggingFace tools require the libraries `transformers>=4.29.0`" " and `huggingface_hub>=0.14.1` to be installed." " Please install it with" " `pip install --upgrade transformers huggingface_hub`." ) hf_tool = load_tool( task_or_repo_id, model_repo_id=model_repo_id, token=token, remote=remote, **kwargs, ) outputs = hf_tool.outputs if set(outputs) != {"text"}: raise NotImplementedError("Multimodal outputs not supported yet.") inputs = hf_tool.inputs if set(inputs) != {"text"}: raise NotImplementedError("Multimodal inputs not supported yet.") return Tool.from_function( hf_tool.__call__, name=hf_tool.name, description=hf_tool.description ) def load_tools( tool_names: List[str], llm: Optional[BaseLanguageModel] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> List[BaseTool]: """Load tools based on their name. Tools allow agents to interact with various resources and services like APIs, databases, file systems, etc. Please scope the permissions of each tools to the minimum required for the application. For example, if an application only needs to read from a database, the database tool should not be given write permissions. Moreover consider scoping the permissions to only allow accessing specific tables and impose user-level quota for limiting resource usage. Please read the APIs of the individual tools to determine which configuration they support. See [Security](https://python.langchain.com/docs/security) for more information. Args: tool_names: name of tools to load. llm: An optional language model, may be needed to initialize certain tools. callbacks: Optional callback manager or list of callback handlers. If not provided, default global callback manager will be used. Returns: List of tools. """ tools = [] callbacks = _handle_callbacks( callback_manager=kwargs.get("callback_manager"), callbacks=callbacks ) # print(_BASE_TOOLS) # print(1) for name in tool_names: if name == "requests": warnings.warn( "tool name `requests` is deprecated - " "please use `requests_all` or specify the requests method" ) if name == "requests_all": # expand requests into various methods requests_method_tools = [ _tool for _tool in _BASE_TOOLS if _tool.startswith("requests_") ] tool_names.extend(requests_method_tools) elif name in _BASE_TOOLS: tools.append(_BASE_TOOLS[name]()) elif name in _LLM_TOOLS: if llm is None: raise ValueError(f"Tool {name} requires an LLM to be provided") tool = _LLM_TOOLS[name](llm) tools.append(tool) elif name in _EXTRA_LLM_TOOLS: if llm is None: raise ValueError(f"Tool {name} requires an LLM to be provided") _get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name] missing_keys = set(extra_keys).difference(kwargs) if missing_keys: raise ValueError( f"Tool {name} requires some parameters that were not " f"provided: {missing_keys}" ) sub_kwargs = {k: kwargs[k] for k in extra_keys} tool = _get_llm_tool_func(llm=llm, **sub_kwargs) tools.append(tool) elif name in _EXTRA_OPTIONAL_TOOLS: _get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name] sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs} tool = _get_tool_func(**sub_kwargs) tools.append(tool) else: raise ValueError(f"Got unknown tool {name}") if callbacks is not None: for tool in tools: tool.callbacks = callbacks return tools def get_all_tool_names() -> List[str]: """Get a list of all possible tool names.""" return ( list(_BASE_TOOLS) + list(_EXTRA_OPTIONAL_TOOLS) + list(_EXTRA_LLM_TOOLS) + list(_LLM_TOOLS) )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~sagemaker_endpoint.py
from typing import Any, Dict, List, Optional from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from langchain_community.llms.sagemaker_endpoint import ContentHandlerBase class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]): """Content handler for LLM class.""" class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain_community.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) #Use with boto3 client client = boto3.client( "sagemaker-runtime", region_name=region_name ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, client=client ) """ client: Any = None endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: EmbeddingsContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain_community.embeddings.sagemaker_endpoint import EmbeddingsContentHandler class ContentHandler(EmbeddingsContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes: input_str = json.dumps({prompts: prompts, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> List[List[float]]: response_json = json.loads(output.read().decode("utf-8")) return response_json["vectors"] """ # noqa: E501 model_kwargs: Optional[Dict] = None """Keyword arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Dont do anything if client provided externally""" if values.get("client") is not None: return values """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[List[float]]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.extend(response) return results def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~moderation.py
"""Pass input through a moderation endpoint.""" from typing import Any, Dict, List, Optional from libs.core.langchain_core.pydantic_v1 import root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.utils import get_from_dict_or_env class OpenAIModerationChain(Chain): """Pass input through a moderation endpoint. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chains import OpenAIModerationChain moderation = OpenAIModerationChain() """ client: Any #: :meta private: model_name: Optional[str] = None """Moderation model name to use.""" error: bool = False """Whether or not to error if bad content was found.""" input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: openai_api_key: Optional[str] = None openai_organization: Optional[str] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization values["client"] = openai.Moderation except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _moderate(self, text: str, results: dict) -> str: if results["flagged"]: error_str = "Text was found that violates OpenAI's content policy." if self.error: raise ValueError(error_str) else: return error_str return text def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: text = inputs[self.input_key] results = self.client.create(text) output = self._moderate(text, results["results"][0]) return {self.output_key: output}
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~gmail~send_message.py
"""Send Gmail messages.""" import base64 from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from typing import Any, Dict, List, Optional, Type, Union from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.tools.gmail.base import GmailBaseTool class SendMessageSchema(BaseModel): """Input for SendMessageTool.""" message: str = Field( ..., description="The message to send.", ) to: Union[str, List[str]] = Field( ..., description="The list of recipients.", ) subject: str = Field( ..., description="The subject of the message.", ) cc: Optional[Union[str, List[str]]] = Field( None, description="The list of CC recipients.", ) bcc: Optional[Union[str, List[str]]] = Field( None, description="The list of BCC recipients.", ) class GmailSendMessage(GmailBaseTool): """Tool that sends a message to Gmail.""" name: str = "send_gmail_message" description: str = ( "Use this tool to send email messages." " The input is the message, recipients" ) args_schema: Type[SendMessageSchema] = SendMessageSchema def _prepare_message( self, message: str, to: Union[str, List[str]], subject: str, cc: Optional[Union[str, List[str]]] = None, bcc: Optional[Union[str, List[str]]] = None, ) -> Dict[str, Any]: """Create a message for an email.""" mime_message = MIMEMultipart() mime_message.attach(MIMEText(message, "html")) mime_message["To"] = ", ".join(to if isinstance(to, list) else [to]) mime_message["Subject"] = subject if cc is not None: mime_message["Cc"] = ", ".join(cc if isinstance(cc, list) else [cc]) if bcc is not None: mime_message["Bcc"] = ", ".join(bcc if isinstance(bcc, list) else [bcc]) encoded_message = base64.urlsafe_b64encode(mime_message.as_bytes()).decode() return {"raw": encoded_message} def _run( self, message: str, to: Union[str, List[str]], subject: str, cc: Optional[Union[str, List[str]]] = None, bcc: Optional[Union[str, List[str]]] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Run the tool.""" try: create_message = self._prepare_message(message, to, subject, cc=cc, bcc=bcc) send_message = ( self.api_resource.users() .messages() .send(userId="me", body=create_message) ) sent_message = send_message.execute() return f'Message sent. Message Id: {sent_message["id"]}' except Exception as error: raise Exception(f"An error occurred: {error}")
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~retrievers~test_pubmed.py
"""Integration test for PubMed API Wrapper.""" from typing import List import pytest from libs.core.langchain_core.documents import Document from langchain_community.retrievers import PubMedRetriever @pytest.fixture def retriever() -> PubMedRetriever: return PubMedRetriever() def assert_docs(docs: List[Document]) -> None: for doc in docs: assert doc.metadata assert set(doc.metadata) == { "Copyright Information", "uid", "Title", "Published", } def test_load_success(retriever: PubMedRetriever) -> None: docs = retriever.get_relevant_documents(query="chatgpt") assert len(docs) == 3 assert_docs(docs) def test_load_success_top_k_results(retriever: PubMedRetriever) -> None: retriever.top_k_results = 2 docs = retriever.get_relevant_documents(query="chatgpt") assert len(docs) == 2 assert_docs(docs) def test_load_no_result(retriever: PubMedRetriever) -> None: docs = retriever.get_relevant_documents("1605.08386WWW") assert not docs
[]
2024-01-10
mth93/langchain
libs~partners~nvidia-ai-endpoints~langchain_nvidia_ai_endpoints~chat_models.py
"""Chat Model Components Derived from ChatModel/NVIDIA""" from __future__ import annotations import base64 import logging import os import urllib.parse from typing import ( Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, Sequence, Union, ) import requests from libs.core.langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import SimpleChatModel from libs.core.langchain_core.messages import BaseMessage, ChatMessage, ChatMessageChunk from libs.core.langchain_core.outputs import ChatGenerationChunk from langchain_nvidia_ai_endpoints import _common as nvidia_ai_endpoints logger = logging.getLogger(__name__) def _is_openai_parts_format(part: dict) -> bool: return "type" in part def _is_url(s: str) -> bool: try: result = urllib.parse.urlparse(s) return all([result.scheme, result.netloc]) except Exception as e: logger.debug(f"Unable to parse URL: {e}") return False def _is_b64(s: str) -> bool: return s.startswith("data:image") def _url_to_b64_string(image_source: str) -> str: b64_template = "data:image/png;base64,{b64_string}" try: if _is_url(image_source): response = requests.get(image_source) response.raise_for_status() encoded = base64.b64encode(response.content).decode("utf-8") return b64_template.format(b64_string=encoded) elif _is_b64(image_source): return image_source elif os.path.exists(image_source): with open(image_source, "rb") as f: encoded = base64.b64encode(f.read()).decode("utf-8") return b64_template.format(b64_string=encoded) else: raise ValueError( "The provided string is not a valid URL, base64, or file path." ) except Exception as e: raise ValueError(f"Unable to process the provided image source: {e}") class ChatNVIDIA(nvidia_ai_endpoints._NVIDIAClient, SimpleChatModel): """NVIDIA chat model. Example: .. code-block:: python from langchain_nvidia_ai_endpoints import ChatNVIDIA model = ChatNVIDIA(model="llama2_13b") response = model.invoke("Hello") """ @property def _llm_type(self) -> str: """Return type of NVIDIA AI Foundation Model Interface.""" return "chat-nvidia-ai-playground" def _call( self, messages: List[BaseMessage], stop: Optional[Sequence[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, labels: Optional[dict] = None, **kwargs: Any, ) -> str: """Invoke on a single list of chat messages.""" inputs = self.custom_preprocess(messages) responses = self.get_generation( inputs=inputs, stop=stop, labels=labels, **kwargs ) outputs = self.custom_postprocess(responses) return outputs def _get_filled_chunk( self, text: str, role: Optional[str] = "assistant" ) -> ChatGenerationChunk: """Fill the generation chunk.""" return ChatGenerationChunk(message=ChatMessageChunk(content=text, role=role)) def _stream( self, messages: List[BaseMessage], stop: Optional[Sequence[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, labels: Optional[dict] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: """Allows streaming to model!""" inputs = self.custom_preprocess(messages) for response in self.get_stream( inputs=inputs, stop=stop, labels=labels, **kwargs ): chunk = self._get_filled_chunk(self.custom_postprocess(response)) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) async def _astream( self, messages: List[BaseMessage], stop: Optional[Sequence[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, labels: Optional[dict] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: inputs = self.custom_preprocess(messages) async for response in self.get_astream( inputs=inputs, stop=stop, labels=labels, **kwargs ): chunk = self._get_filled_chunk(self.custom_postprocess(response)) yield chunk if run_manager: await run_manager.on_llm_new_token(chunk.text, chunk=chunk) def custom_preprocess( self, msg_list: Sequence[BaseMessage] ) -> List[Dict[str, str]]: # The previous author had a lot of custom preprocessing here # but I'm just going to assume it's a list return [self.preprocess_msg(m) for m in msg_list] def _process_content(self, content: Union[str, List[Union[dict, str]]]) -> str: if isinstance(content, str): return content string_array: list = [] for part in content: if isinstance(part, str): string_array.append(part) elif isinstance(part, Mapping): # OpenAI Format if _is_openai_parts_format(part): if part["type"] == "text": string_array.append(str(part["text"])) elif part["type"] == "image_url": img_url = part["image_url"] if isinstance(img_url, dict): if "url" not in img_url: raise ValueError( f"Unrecognized message image format: {img_url}" ) img_url = img_url["url"] b64_string = _url_to_b64_string(img_url) string_array.append(f'<img src="{b64_string}" />') else: raise ValueError( f"Unrecognized message part type: {part['type']}" ) else: raise ValueError(f"Unrecognized message part format: {part}") return "".join(string_array) def preprocess_msg(self, msg: BaseMessage) -> Dict[str, str]: ## (WFH): Previous author added a bunch of # custom processing here, but I'm just going to support # the LCEL api. if isinstance(msg, BaseMessage): role_convert = {"ai": "assistant", "human": "user"} if isinstance(msg, ChatMessage): role = msg.role else: role = msg.type role = role_convert.get(role, role) content = self._process_content(msg.content) return {"role": role, "content": content} raise ValueError(f"Invalid message: {repr(msg)} of type {type(msg)}") def custom_postprocess(self, msg: dict) -> str: if "content" in msg: return msg["content"] logger.warning( f"Got ambiguous message in postprocessing; returning as-is: msg = {msg}" ) return str(msg)
[ "data:image/png;base64,{b64_string}" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~ernie.py
import logging import threading from typing import Any, Dict, List, Mapping, Optional import requests from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.messages import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatResult from libs.core.langchain_core.pydantic_v1 import root_validator from libs.core.langchain_core.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} else: raise ValueError(f"Got unknown type {message}") return message_dict class ErnieBotChat(BaseChatModel): """`ERNIE-Bot` large language model. ERNIE-Bot is a large language model developed by Baidu, covering a huge amount of Chinese data. To use, you should have the `ernie_client_id` and `ernie_client_secret` set, or set the environment variable `ERNIE_CLIENT_ID` and `ERNIE_CLIENT_SECRET`. Note: access_token will be automatically generated based on client_id and client_secret, and will be regenerated after expiration (30 days). Default model is `ERNIE-Bot-turbo`, currently supported models are `ERNIE-Bot-turbo`, `ERNIE-Bot`, `ERNIE-Bot-8K`, `ERNIE-Bot-4`, `ERNIE-Bot-turbo-AI`. Example: .. code-block:: python from langchain_community.chat_models import ErnieBotChat chat = ErnieBotChat(model_name='ERNIE-Bot') Deprecated Note: Please use `QianfanChatEndpoint` instead of this class. `QianfanChatEndpoint` is a more suitable choice for production. Always test your code after changing to `QianfanChatEndpoint`. Example of `QianfanChatEndpoint`: .. code-block:: python from langchain_community.chat_models import QianfanChatEndpoint qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot", endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk") """ ernie_api_base: Optional[str] = None """Baidu application custom endpoints""" ernie_client_id: Optional[str] = None """Baidu application client id""" ernie_client_secret: Optional[str] = None """Baidu application client secret""" access_token: Optional[str] = None """access token is generated by client id and client secret, setting this value directly will cause an error""" model_name: str = "ERNIE-Bot-turbo" """model name of ernie, default is `ERNIE-Bot-turbo`. Currently supported `ERNIE-Bot-turbo`, `ERNIE-Bot`""" system: Optional[str] = None """system is mainly used for model character design, for example, you are an AI assistant produced by xxx company. The length of the system is limiting of 1024 characters.""" request_timeout: Optional[int] = 60 """request timeout for chat http requests""" streaming: Optional[bool] = False """streaming mode. not supported yet.""" top_p: Optional[float] = 0.8 temperature: Optional[float] = 0.95 penalty_score: Optional[float] = 1 _lock = threading.Lock() @root_validator() def validate_environment(cls, values: Dict) -> Dict: values["ernie_api_base"] = get_from_dict_or_env( values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com" ) values["ernie_client_id"] = get_from_dict_or_env( values, "ernie_client_id", "ERNIE_CLIENT_ID", ) values["ernie_client_secret"] = get_from_dict_or_env( values, "ernie_client_secret", "ERNIE_CLIENT_SECRET", ) return values def _chat(self, payload: object) -> dict: base_url = f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/chat" model_paths = { "ERNIE-Bot-turbo": "eb-instant", "ERNIE-Bot": "completions", "ERNIE-Bot-8K": "ernie_bot_8k", "ERNIE-Bot-4": "completions_pro", "ERNIE-Bot-turbo-AI": "ai_apaas", "BLOOMZ-7B": "bloomz_7b1", "Llama-2-7b-chat": "llama_2_7b", "Llama-2-13b-chat": "llama_2_13b", "Llama-2-70b-chat": "llama_2_70b", } if self.model_name in model_paths: url = f"{base_url}/{model_paths[self.model_name]}" else: raise ValueError(f"Got unknown model_name {self.model_name}") resp = requests.post( url, timeout=self.request_timeout, headers={ "Content-Type": "application/json", }, params={"access_token": self.access_token}, json=payload, ) return resp.json() def _refresh_access_token_with_lock(self) -> None: with self._lock: logger.debug("Refreshing access token") base_url: str = f"{self.ernie_api_base}/oauth/2.0/token" resp = requests.post( base_url, timeout=10, headers={ "Content-Type": "application/json", "Accept": "application/json", }, params={ "grant_type": "client_credentials", "client_id": self.ernie_client_id, "client_secret": self.ernie_client_secret, }, ) self.access_token = str(resp.json().get("access_token")) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: raise ValueError("`streaming` option currently unsupported.") if not self.access_token: self._refresh_access_token_with_lock() payload = { "messages": [_convert_message_to_dict(m) for m in messages], "top_p": self.top_p, "temperature": self.temperature, "penalty_score": self.penalty_score, "system": self.system, **kwargs, } logger.debug(f"Payload for ernie api is {payload}") resp = self._chat(payload) if resp.get("error_code"): if resp.get("error_code") == 111: logger.debug("access_token expired, refresh it") self._refresh_access_token_with_lock() resp = self._chat(payload) else: raise ValueError(f"Error from ErnieChat api response: {resp}") return self._create_chat_result(resp) def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: if "function_call" in response: additional_kwargs = { "function_call": dict(response.get("function_call", {})) } else: additional_kwargs = {} generations = [ ChatGeneration( message=AIMessage( content=response.get("result"), additional_kwargs={**additional_kwargs}, ) ) ] token_usage = response.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) @property def _llm_type(self) -> str: return "ernie-bot-chat"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~vertexai.py
"""Wrapper around Google VertexAI chat-based models.""" from __future__ import annotations import base64 import logging import re from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union, cast from urllib.parse import urlparse import requests from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import ( BaseChatModel, generate_from_stream, ) from libs.core.langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, HumanMessage, SystemMessage, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from libs.core.langchain_core.pydantic_v1 import root_validator from langchain_community.llms.vertexai import ( _VertexAICommon, is_codey_model, is_gemini_model, ) from langchain_community.utilities.vertexai import ( load_image_from_gcs, raise_vertex_import_error, ) if TYPE_CHECKING: from vertexai.language_models import ( ChatMessage, ChatSession, CodeChatSession, InputOutputTextPair, ) from vertexai.preview.generative_models import Content logger = logging.getLogger(__name__) @dataclass class _ChatHistory: """Represents a context and a history of messages.""" history: List["ChatMessage"] = field(default_factory=list) context: Optional[str] = None def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory: """Parse a sequence of messages into history. Args: history: The list of messages to re-create the history of the chat. Returns: A parsed chat history. Raises: ValueError: If a sequence of message has a SystemMessage not at the first place. """ from vertexai.language_models import ChatMessage vertex_messages, context = [], None for i, message in enumerate(history): content = cast(str, message.content) if i == 0 and isinstance(message, SystemMessage): context = content elif isinstance(message, AIMessage): vertex_message = ChatMessage(content=message.content, author="bot") vertex_messages.append(vertex_message) elif isinstance(message, HumanMessage): vertex_message = ChatMessage(content=message.content, author="user") vertex_messages.append(vertex_message) else: raise ValueError( f"Unexpected message with type {type(message)} at the position {i}." ) chat_history = _ChatHistory(context=context, history=vertex_messages) return chat_history def _is_url(s: str) -> bool: try: result = urlparse(s) return all([result.scheme, result.netloc]) except Exception as e: logger.debug(f"Unable to parse URL: {e}") return False def _parse_chat_history_gemini( history: List[BaseMessage], project: Optional[str] ) -> List["Content"]: from vertexai.preview.generative_models import Content, Image, Part def _convert_to_prompt(part: Union[str, Dict]) -> Part: if isinstance(part, str): return Part.from_text(part) if not isinstance(part, Dict): raise ValueError( f"Message's content is expected to be a dict, got {type(part)}!" ) if part["type"] == "text": return Part.from_text(part["text"]) elif part["type"] == "image_url": path = part["image_url"]["url"] if path.startswith("gs://"): image = load_image_from_gcs(path=path, project=project) elif path.startswith("data:image/"): # extract base64 component from image uri try: encoded = re.search(r"data:image/\w{2,4};base64,(.*)", path).group( 1 ) except AttributeError: raise ValueError( "Invalid image uri. It should be in the format " "data:image/<image_type>;base64,<base64_encoded_image>." ) image = Image.from_bytes(base64.b64decode(encoded)) elif _is_url(path): response = requests.get(path) response.raise_for_status() image = Image.from_bytes(response.content) else: image = Image.load_from_file(path) else: raise ValueError("Only text and image_url types are supported!") return Part.from_image(image) vertex_messages = [] for i, message in enumerate(history): if i == 0 and isinstance(message, SystemMessage): raise ValueError("SystemMessages are not yet supported!") elif isinstance(message, AIMessage): role = "model" elif isinstance(message, HumanMessage): role = "user" else: raise ValueError( f"Unexpected message with type {type(message)} at the position {i}." ) raw_content = message.content if isinstance(raw_content, str): raw_content = [raw_content] parts = [_convert_to_prompt(part) for part in raw_content] vertex_message = Content(role=role, parts=parts) vertex_messages.append(vertex_message) return vertex_messages def _parse_examples(examples: List[BaseMessage]) -> List["InputOutputTextPair"]: from vertexai.language_models import InputOutputTextPair if len(examples) % 2 != 0: raise ValueError( f"Expect examples to have an even amount of messages, got {len(examples)}." ) example_pairs = [] input_text = None for i, example in enumerate(examples): if i % 2 == 0: if not isinstance(example, HumanMessage): raise ValueError( f"Expected the first message in a part to be from human, got " f"{type(example)} for the {i}th message." ) input_text = example.content if i % 2 == 1: if not isinstance(example, AIMessage): raise ValueError( f"Expected the second message in a part to be from AI, got " f"{type(example)} for the {i}th message." ) pair = InputOutputTextPair( input_text=input_text, output_text=example.content ) example_pairs.append(pair) return example_pairs def _get_question(messages: List[BaseMessage]) -> HumanMessage: """Get the human message at the end of a list of input messages to a chat model.""" if not messages: raise ValueError("You should provide at least one message to start the chat!") question = messages[-1] if not isinstance(question, HumanMessage): raise ValueError( f"Last message in the list should be from human, got {question.type}." ) return question class ChatVertexAI(_VertexAICommon, BaseChatModel): """`Vertex AI` Chat large language models API.""" model_name: str = "chat-bison" "Underlying model name." examples: Optional[List[BaseMessage]] = None @classmethod def is_lc_serializable(self) -> bool: return True @classmethod def get_lc_namespace(cls) -> List[str]: """Get the namespace of the langchain object.""" return ["langchain", "chat_models", "vertexai"] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" is_gemini = is_gemini_model(values["model_name"]) cls._try_init_vertexai(values) try: from vertexai.language_models import ChatModel, CodeChatModel if is_gemini: from vertexai.preview.generative_models import ( GenerativeModel, ) except ImportError: raise_vertex_import_error() if is_gemini: values["client"] = GenerativeModel(model_name=values["model_name"]) else: if is_codey_model(values["model_name"]): model_cls = CodeChatModel else: model_cls = ChatModel values["client"] = model_cls.from_pretrained(values["model_name"]) return values def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: """Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. Code chat does not support context. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. stream: Whether to use the streaming endpoint. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ should_stream = stream if stream is not None else self.streaming if should_stream: stream_iter = self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ) return generate_from_stream(stream_iter) question = _get_question(messages) params = self._prepare_params(stop=stop, stream=False, **kwargs) msg_params = {} if "candidate_count" in params: msg_params["candidate_count"] = params.pop("candidate_count") if self._is_gemini_model: history_gemini = _parse_chat_history_gemini(messages, project=self.project) message = history_gemini.pop() chat = self.client.start_chat(history=history_gemini) response = chat.send_message(message, generation_config=params) else: history = _parse_chat_history(messages[:-1]) examples = kwargs.get("examples") or self.examples if examples: params["examples"] = _parse_examples(examples) chat = self._start_chat(history, **params) response = chat.send_message(question.content, **msg_params) generations = [ ChatGeneration(message=AIMessage(content=r.text)) for r in response.candidates ] return ChatResult(generations=generations) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Asynchronously generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. Code chat does not support context. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ if "stream" in kwargs: kwargs.pop("stream") logger.warning("ChatVertexAI does not currently support async streaming.") params = self._prepare_params(stop=stop, **kwargs) msg_params = {} if "candidate_count" in params: msg_params["candidate_count"] = params.pop("candidate_count") if self._is_gemini_model: history_gemini = _parse_chat_history_gemini(messages, project=self.project) message = history_gemini.pop() chat = self.client.start_chat(history=history_gemini) response = await chat.send_message_async(message, generation_config=params) else: question = _get_question(messages) history = _parse_chat_history(messages[:-1]) examples = kwargs.get("examples", None) if examples: params["examples"] = _parse_examples(examples) chat = self._start_chat(history, **params) response = await chat.send_message_async(question.content, **msg_params) generations = [ ChatGeneration(message=AIMessage(content=r.text)) for r in response.candidates ] return ChatResult(generations=generations) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: params = self._prepare_params(stop=stop, stream=True, **kwargs) if self._is_gemini_model: history_gemini = _parse_chat_history_gemini(messages, project=self.project) message = history_gemini.pop() chat = self.client.start_chat(history=history_gemini) responses = chat.send_message( message, stream=True, generation_config=params ) else: question = _get_question(messages) history = _parse_chat_history(messages[:-1]) examples = kwargs.get("examples", None) if examples: params["examples"] = _parse_examples(examples) chat = self._start_chat(history, **params) responses = chat.send_message_streaming(question.content, **params) for response in responses: if run_manager: run_manager.on_llm_new_token(response.text) yield ChatGenerationChunk(message=AIMessageChunk(content=response.text)) def _start_chat( self, history: _ChatHistory, **kwargs: Any ) -> Union[ChatSession, CodeChatSession]: if not self.is_codey_model: return self.client.start_chat( context=history.context, message_history=history.history, **kwargs ) else: return self.client.start_chat(message_history=history.history, **kwargs)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~document_loaders~test_xorbits.py
import pytest from libs.core.langchain_core.documents import Document from langchain_community.document_loaders import XorbitsLoader try: import xorbits # noqa: F401 xorbits_installed = True except ImportError: xorbits_installed = False @pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed") def test_load_returns_list_of_documents() -> None: import xorbits.pandas as pd data = { "text": ["Hello", "World"], "author": ["Alice", "Bob"], "date": ["2022-01-01", "2022-01-02"], } loader = XorbitsLoader(pd.DataFrame(data)) docs = loader.load() assert isinstance(docs, list) assert all(isinstance(doc, Document) for doc in docs) assert len(docs) == 2 @pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed") def test_load_converts_dataframe_columns_to_document_metadata() -> None: import xorbits.pandas as pd data = { "text": ["Hello", "World"], "author": ["Alice", "Bob"], "date": ["2022-01-01", "2022-01-02"], } loader = XorbitsLoader(pd.DataFrame(data)) docs = loader.load() expected = { "author": ["Alice", "Bob"], "date": ["2022-01-01", "2022-01-02"], } for i, doc in enumerate(docs): assert doc.metadata["author"] == expected["author"][i] assert doc.metadata["date"] == expected["date"][i] @pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed") def test_load_uses_page_content_column_to_create_document_text() -> None: import xorbits.pandas as pd data = { "text": ["Hello", "World"], "author": ["Alice", "Bob"], "date": ["2022-01-01", "2022-01-02"], } sample_data_frame = pd.DataFrame(data) sample_data_frame = sample_data_frame.rename(columns={"text": "dummy_test_column"}) loader = XorbitsLoader(sample_data_frame, page_content_column="dummy_test_column") docs = loader.load() assert docs[0].page_content == "Hello" assert docs[1].page_content == "World"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~recursive_url_loader.py
from __future__ import annotations import asyncio import logging import re from typing import ( TYPE_CHECKING, Callable, Iterator, List, Optional, Sequence, Set, Union, ) import requests from libs.core.langchain_core.documents import Document from libs.core.langchain_core.utils.html import extract_sub_links from langchain_community.document_loaders.base import BaseLoader if TYPE_CHECKING: import aiohttp logger = logging.getLogger(__name__) def _metadata_extractor(raw_html: str, url: str) -> dict: """Extract metadata from raw html using BeautifulSoup.""" metadata = {"source": url} try: from bs4 import BeautifulSoup except ImportError: logger.warning( "The bs4 package is required for default metadata extraction. " "Please install it with `pip install bs4`." ) return metadata soup = BeautifulSoup(raw_html, "html.parser") if title := soup.find("title"): metadata["title"] = title.get_text() if description := soup.find("meta", attrs={"name": "description"}): metadata["description"] = description.get("content", None) if html := soup.find("html"): metadata["language"] = html.get("lang", None) return metadata class RecursiveUrlLoader(BaseLoader): """Load all child links from a URL page. **Security Note**: This loader is a crawler that will start crawling at a given URL and then expand to crawl child links recursively. Web crawlers should generally NOT be deployed with network access to any internal servers. Control access to who can submit crawling requests and what network access the crawler has. While crawling, the crawler may encounter malicious URLs that would lead to a server-side request forgery (SSRF) attack. To mitigate risks, the crawler by default will only load URLs from the same domain as the start URL (controlled via prevent_outside named argument). This will mitigate the risk of SSRF attacks, but will not eliminate it. For example, if crawling a host which hosts several sites: https://some_host/alice_site/ https://some_host/bob_site/ A malicious URL on Alice's site could cause the crawler to make a malicious GET request to an endpoint on Bob's site. Both sites are hosted on the same host, so such a request would not be prevented by default. See https://python.langchain.com/docs/security """ def __init__( self, url: str, max_depth: Optional[int] = 2, use_async: Optional[bool] = None, extractor: Optional[Callable[[str], str]] = None, metadata_extractor: Optional[Callable[[str, str], str]] = None, exclude_dirs: Optional[Sequence[str]] = (), timeout: Optional[int] = 10, prevent_outside: bool = True, link_regex: Union[str, re.Pattern, None] = None, headers: Optional[dict] = None, check_response_status: bool = False, ) -> None: """Initialize with URL to crawl and any subdirectories to exclude. Args: url: The URL to crawl. max_depth: The max depth of the recursive loading. use_async: Whether to use asynchronous loading. If True, this function will not be lazy, but it will still work in the expected way, just not lazy. extractor: A function to extract document contents from raw html. When extract function returns an empty string, the document is ignored. metadata_extractor: A function to extract metadata from raw html and the source url (args in that order). Default extractor will attempt to use BeautifulSoup4 to extract the title, description and language of the page. exclude_dirs: A list of subdirectories to exclude. timeout: The timeout for the requests, in the unit of seconds. If None then connection will not timeout. prevent_outside: If True, prevent loading from urls which are not children of the root url. link_regex: Regex for extracting sub-links from the raw html of a web page. check_response_status: If True, check HTTP response status and skip URLs with error responses (400-599). """ self.url = url self.max_depth = max_depth if max_depth is not None else 2 self.use_async = use_async if use_async is not None else False self.extractor = extractor if extractor is not None else lambda x: x self.metadata_extractor = ( metadata_extractor if metadata_extractor is not None else _metadata_extractor ) self.exclude_dirs = exclude_dirs if exclude_dirs is not None else () if any(url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs): raise ValueError( f"Base url is included in exclude_dirs. Received base_url: {url} and " f"exclude_dirs: {self.exclude_dirs}" ) self.timeout = timeout self.prevent_outside = prevent_outside if prevent_outside is not None else True self.link_regex = link_regex self._lock = asyncio.Lock() if self.use_async else None self.headers = headers self.check_response_status = check_response_status def _get_child_links_recursive( self, url: str, visited: Set[str], *, depth: int = 0 ) -> Iterator[Document]: """Recursively get all child links starting with the path of the input URL. Args: url: The URL to crawl. visited: A set of visited URLs. depth: Current depth of recursion. Stop when depth >= max_depth. """ if depth >= self.max_depth: return # Get all links that can be accessed from the current URL visited.add(url) try: response = requests.get(url, timeout=self.timeout, headers=self.headers) if self.check_response_status and 400 <= response.status_code <= 599: raise ValueError(f"Received HTTP status {response.status_code}") except Exception as e: logger.warning( f"Unable to load from {url}. Received error {e} of type " f"{e.__class__.__name__}" ) return content = self.extractor(response.text) if content: yield Document( page_content=content, metadata=self.metadata_extractor(response.text, url), ) # Store the visited links and recursively visit the children sub_links = extract_sub_links( response.text, url, base_url=self.url, pattern=self.link_regex, prevent_outside=self.prevent_outside, exclude_prefixes=self.exclude_dirs, ) for link in sub_links: # Check all unvisited links if link not in visited: yield from self._get_child_links_recursive( link, visited, depth=depth + 1 ) async def _async_get_child_links_recursive( self, url: str, visited: Set[str], *, session: Optional[aiohttp.ClientSession] = None, depth: int = 0, ) -> List[Document]: """Recursively get all child links starting with the path of the input URL. Args: url: The URL to crawl. visited: A set of visited URLs. depth: To reach the current url, how many pages have been visited. """ try: import aiohttp except ImportError: raise ImportError( "The aiohttp package is required for the RecursiveUrlLoader. " "Please install it with `pip install aiohttp`." ) if depth >= self.max_depth: return [] # Disable SSL verification because websites may have invalid SSL certificates, # but won't cause any security issues for us. close_session = session is None session = ( session if session is not None else aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl=False), timeout=aiohttp.ClientTimeout(total=self.timeout), headers=self.headers, ) ) async with self._lock: # type: ignore visited.add(url) try: async with session.get(url) as response: text = await response.text() if self.check_response_status and 400 <= response.status <= 599: raise ValueError(f"Received HTTP status {response.status}") except (aiohttp.client_exceptions.InvalidURL, Exception) as e: logger.warning( f"Unable to load {url}. Received error {e} of type " f"{e.__class__.__name__}" ) if close_session: await session.close() return [] results = [] content = self.extractor(text) if content: results.append( Document( page_content=content, metadata=self.metadata_extractor(text, url), ) ) if depth < self.max_depth - 1: sub_links = extract_sub_links( text, url, base_url=self.url, pattern=self.link_regex, prevent_outside=self.prevent_outside, exclude_prefixes=self.exclude_dirs, ) # Recursively call the function to get the children of the children sub_tasks = [] async with self._lock: # type: ignore to_visit = set(sub_links).difference(visited) for link in to_visit: sub_tasks.append( self._async_get_child_links_recursive( link, visited, session=session, depth=depth + 1 ) ) next_results = await asyncio.gather(*sub_tasks) for sub_result in next_results: if isinstance(sub_result, Exception) or sub_result is None: # We don't want to stop the whole process, so just ignore it # Not standard html format or invalid url or 404 may cause this. continue # locking not fully working, temporary hack to ensure deduplication results += [r for r in sub_result if r not in results] if close_session: await session.close() return results def lazy_load(self) -> Iterator[Document]: """Lazy load web pages. When use_async is True, this function will not be lazy, but it will still work in the expected way, just not lazy.""" visited: Set[str] = set() if self.use_async: results = asyncio.run( self._async_get_child_links_recursive(self.url, visited) ) return iter(results or []) else: return self._get_child_links_recursive(self.url, visited) def load(self) -> List[Document]: """Load web pages.""" return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~agents~test_agent_async.py
"""Unit tests for agents.""" from typing import Any, Dict, List, Optional from libs.core.langchain_core.agents import AgentAction, AgentStep from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.messages import AIMessage, HumanMessage from libs.core.langchain_core.tools import Tool from langchain.agents import AgentExecutor, AgentType, initialize_agent from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.schema.runnable.utils import add from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler class FakeListLLM(LLM): """Fake LLM for testing that outputs elements of a list.""" responses: List[str] i: int = -1 def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Increment counter, and then return response in that index.""" self.i += 1 print(f"=== Mock Response #{self.i} ===") print(self.responses[self.i]) return self.responses[self.i] def get_num_tokens(self, text: str) -> int: """Return number of tokens in text.""" return len(text.split()) async def _acall(self, *args: Any, **kwargs: Any) -> str: return self._call(*args, **kwargs) @property def _identifying_params(self) -> Dict[str, Any]: return {} @property def _llm_type(self) -> str: """Return type of llm.""" return "fake_list" def _get_agent(**kwargs: Any) -> AgentExecutor: """Get agent for testing.""" bad_action_name = "BadAction" responses = [ f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment", "Oh well\nFinal Answer: curses foiled again", ] fake_llm = FakeListLLM(cache=False, responses=responses) tools = [ Tool( name="Search", func=lambda x: x, description="Useful for searching", ), Tool( name="Lookup", func=lambda x: x, description="Useful for looking up things in a table", ), ] agent = initialize_agent( tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, **kwargs, ) return agent async def test_agent_bad_action() -> None: """Test react chain when bad action given.""" agent = _get_agent() output = await agent.arun("when was langchain made") assert output == "curses foiled again" async def test_agent_stopped_early() -> None: """Test react chain when max iterations or max execution time is exceeded.""" # iteration limit agent = _get_agent(max_iterations=0) output = await agent.arun("when was langchain made") assert output == "Agent stopped due to iteration limit or time limit." # execution time limit agent = _get_agent(max_execution_time=0.0) output = await agent.arun("when was langchain made") assert output == "Agent stopped due to iteration limit or time limit." async def test_agent_with_callbacks() -> None: """Test react chain with callbacks by setting verbose globally.""" handler1 = FakeCallbackHandler() handler2 = FakeCallbackHandler() tool = "Search" responses = [ f"FooBarBaz\nAction: {tool}\nAction Input: misalignment", "Oh well\nFinal Answer: curses foiled again", ] # Only fake LLM gets callbacks for handler2 fake_llm = FakeListLLM(responses=responses, callbacks=[handler2]) tools = [ Tool( name="Search", func=lambda x: x, description="Useful for searching", ), ] agent = initialize_agent( tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, ) output = await agent.arun("when was langchain made", callbacks=[handler1]) assert output == "curses foiled again" # 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run assert handler1.chain_starts == handler1.chain_ends == 3 assert handler1.llm_starts == handler1.llm_ends == 2 assert handler1.tool_starts == 1 assert handler1.tool_ends == 1 # 1 extra agent action assert handler1.starts == 7 # 1 extra agent end assert handler1.ends == 7 assert handler1.errors == 0 # during LLMChain assert handler1.text == 2 assert handler2.llm_starts == 2 assert handler2.llm_ends == 2 assert ( handler2.chain_starts == handler2.tool_starts == handler2.tool_ends == handler2.chain_ends == 0 ) async def test_agent_stream() -> None: """Test react chain with callbacks by setting verbose globally.""" tool = "Search" responses = [ f"FooBarBaz\nAction: {tool}\nAction Input: misalignment", f"FooBarBaz\nAction: {tool}\nAction Input: something else", "Oh well\nFinal Answer: curses foiled again", ] # Only fake LLM gets callbacks for handler2 fake_llm = FakeListLLM(responses=responses) tools = [ Tool( name="Search", func=lambda x: f"Results for: {x}", description="Useful for searching", ), ] agent = initialize_agent( tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, ) output = [a async for a in agent.astream("when was langchain made")] assert output == [ { "actions": [ AgentAction( tool="Search", tool_input="misalignment", log="FooBarBaz\nAction: Search\nAction Input: misalignment", ) ], "messages": [ AIMessage( content="FooBarBaz\nAction: Search\nAction Input: misalignment" ) ], }, { "steps": [ AgentStep( action=AgentAction( tool="Search", tool_input="misalignment", log="FooBarBaz\nAction: Search\nAction Input: misalignment", ), observation="Results for: misalignment", ) ], "messages": [HumanMessage(content="Results for: misalignment")], }, { "actions": [ AgentAction( tool="Search", tool_input="something else", log="FooBarBaz\nAction: Search\nAction Input: something else", ) ], "messages": [ AIMessage( content="FooBarBaz\nAction: Search\nAction Input: something else" ) ], }, { "steps": [ AgentStep( action=AgentAction( tool="Search", tool_input="something else", log="FooBarBaz\nAction: Search\nAction Input: something else", ), observation="Results for: something else", ) ], "messages": [HumanMessage(content="Results for: something else")], }, { "output": "curses foiled again", "messages": [ AIMessage(content="Oh well\nFinal Answer: curses foiled again") ], }, ] assert add(output) == { "actions": [ AgentAction( tool="Search", tool_input="misalignment", log="FooBarBaz\nAction: Search\nAction Input: misalignment", ), AgentAction( tool="Search", tool_input="something else", log="FooBarBaz\nAction: Search\nAction Input: something else", ), ], "steps": [ AgentStep( action=AgentAction( tool="Search", tool_input="misalignment", log="FooBarBaz\nAction: Search\nAction Input: misalignment", ), observation="Results for: misalignment", ), AgentStep( action=AgentAction( tool="Search", tool_input="something else", log="FooBarBaz\nAction: Search\nAction Input: something else", ), observation="Results for: something else", ), ], "messages": [ AIMessage(content="FooBarBaz\nAction: Search\nAction Input: misalignment"), HumanMessage(content="Results for: misalignment"), AIMessage( content="FooBarBaz\nAction: Search\nAction Input: something else" ), HumanMessage(content="Results for: something else"), AIMessage(content="Oh well\nFinal Answer: curses foiled again"), ], "output": "curses foiled again", } async def test_agent_tool_return_direct() -> None: """Test agent using tools that return directly.""" tool = "Search" responses = [ f"FooBarBaz\nAction: {tool}\nAction Input: misalignment", "Oh well\nFinal Answer: curses foiled again", ] fake_llm = FakeListLLM(responses=responses) tools = [ Tool( name="Search", func=lambda x: x, description="Useful for searching", return_direct=True, ), ] agent = initialize_agent( tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, ) output = await agent.arun("when was langchain made") assert output == "misalignment" async def test_agent_tool_return_direct_in_intermediate_steps() -> None: """Test agent using tools that return directly.""" tool = "Search" responses = [ f"FooBarBaz\nAction: {tool}\nAction Input: misalignment", "Oh well\nFinal Answer: curses foiled again", ] fake_llm = FakeListLLM(responses=responses) tools = [ Tool( name="Search", func=lambda x: x, description="Useful for searching", return_direct=True, ), ] agent = initialize_agent( tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, return_intermediate_steps=True, ) resp = await agent.acall("when was langchain made") assert isinstance(resp, dict) assert resp["output"] == "misalignment" assert len(resp["intermediate_steps"]) == 1 action, _action_intput = resp["intermediate_steps"][0] assert action.tool == "Search" async def test_agent_invalid_tool() -> None: """Test agent invalid tool and correct suggestions.""" fake_llm = FakeListLLM(responses=["FooBarBaz\nAction: Foo\nAction Input: Bar"]) tools = [ Tool( name="Search", func=lambda x: x, description="Useful for searching", return_direct=True, ), ] agent = initialize_agent( tools=tools, llm=fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, return_intermediate_steps=True, max_iterations=1, ) resp = await agent.acall("when was langchain made") resp["intermediate_steps"][0][1] == "Foo is not a valid tool, try one of [Search]."
[ "FooBarBaz\nAction: Search\nAction Input: something else", "Results for: misalignment", "FooBarBaz\nAction: Search\nAction Input: misalignment", "Results for: something else", "Oh well\nFinal Answer: curses foiled again" ]
2024-01-10
mth93/langchain
libs~experimental~langchain_experimental~comprehend_moderation~base_moderation.py
import uuid from typing import Any, Callable, Optional, cast from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.schema import AIMessage, HumanMessage from libs.core.langchain_core.prompt_values import ChatPromptValue, StringPromptValue from langchain_experimental.comprehend_moderation.pii import ComprehendPII from langchain_experimental.comprehend_moderation.prompt_safety import ( ComprehendPromptSafety, ) from langchain_experimental.comprehend_moderation.toxicity import ComprehendToxicity class BaseModeration: def __init__( self, client: Any, config: Optional[Any] = None, moderation_callback: Optional[Any] = None, unique_id: Optional[str] = None, run_manager: Optional[CallbackManagerForChainRun] = None, ): self.client = client self.config = config self.moderation_callback = moderation_callback self.unique_id = unique_id self.chat_message_index = 0 self.run_manager = run_manager self.chain_id = str(uuid.uuid4()) def _convert_prompt_to_text(self, prompt: Any) -> str: input_text = str() if isinstance(prompt, StringPromptValue): input_text = prompt.text elif isinstance(prompt, str): input_text = prompt elif isinstance(prompt, ChatPromptValue): """ We will just check the last message in the message Chain of a ChatPromptTemplate. The typical chronology is SystemMessage > HumanMessage > AIMessage and so on. However assuming that with every chat the chain is invoked we will only check the last message. This is assuming that all previous messages have been checked already. Only HumanMessage and AIMessage will be checked. We can perhaps loop through and take advantage of the additional_kwargs property in the HumanMessage and AIMessage schema to mark messages that have been moderated. However that means that this class could generate multiple text chunks and moderate() logics would need to be updated. This also means some complexity in re-constructing the prompt while keeping the messages in sequence. """ message = prompt.messages[-1] self.chat_message_index = len(prompt.messages) - 1 if isinstance(message, HumanMessage): input_text = cast(str, message.content) if isinstance(message, AIMessage): input_text = cast(str, message.content) else: raise ValueError( f"Invalid input type {type(input_text)}. " "Must be a PromptValue, str, or list of BaseMessages." ) return input_text def _convert_text_to_prompt(self, prompt: Any, text: str) -> Any: if isinstance(prompt, StringPromptValue): return StringPromptValue(text=text) elif isinstance(prompt, str): return text elif isinstance(prompt, ChatPromptValue): # Copy the messages because we may need to mutate them. # We don't want to mutate data we don't own. messages = list(prompt.messages) message = messages[self.chat_message_index] if isinstance(message, HumanMessage): messages[self.chat_message_index] = HumanMessage( content=text, example=message.example, additional_kwargs=message.additional_kwargs, ) if isinstance(message, AIMessage): messages[self.chat_message_index] = AIMessage( content=text, example=message.example, additional_kwargs=message.additional_kwargs, ) return ChatPromptValue(messages=messages) else: raise ValueError( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) def _moderation_class(self, moderation_class: Any) -> Callable: return moderation_class( client=self.client, callback=self.moderation_callback, unique_id=self.unique_id, chain_id=self.chain_id, ).validate def _log_message_for_verbose(self, message: str) -> None: if self.run_manager: self.run_manager.on_text(message) def moderate(self, prompt: Any) -> str: from langchain_experimental.comprehend_moderation.base_moderation_config import ( # noqa: E501 ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( # noqa: E501 ModerationPiiError, ModerationPromptSafetyError, ModerationToxicityError, ) try: # convert prompt to text input_text = self._convert_prompt_to_text(prompt=prompt) output_text = str() # perform moderation filter_functions = { "pii": ComprehendPII, "toxicity": ComprehendToxicity, "prompt_safety": ComprehendPromptSafety, } filters = self.config.filters # type: ignore for _filter in filters: filter_name = ( "pii" if isinstance(_filter, ModerationPiiConfig) else ( "toxicity" if isinstance(_filter, ModerationToxicityConfig) else ( "prompt_safety" if isinstance(_filter, ModerationPromptSafetyConfig) else None ) ) ) if filter_name in filter_functions: self._log_message_for_verbose( f"Running {filter_name} Validation...\n" ) validation_fn = self._moderation_class( moderation_class=filter_functions[filter_name] ) input_text = input_text if not output_text else output_text output_text = validation_fn( prompt_value=input_text, config=_filter.dict(), ) # convert text to prompt and return return self._convert_text_to_prompt(prompt=prompt, text=output_text) except ModerationPiiError as e: self._log_message_for_verbose(f"Found PII content..stopping..\n{str(e)}\n") raise e except ModerationToxicityError as e: self._log_message_for_verbose( f"Found Toxic content..stopping..\n{str(e)}\n" ) raise e except ModerationPromptSafetyError as e: self._log_message_for_verbose( f"Found Harmful intention..stopping..\n{str(e)}\n" ) raise e except Exception as e: raise e
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~openapi~planner.py
"""Agent that interacts with OpenAPI APIs via a hierarchical planning approach.""" import json import re from functools import partial from typing import Any, Callable, Dict, List, Optional import yaml from libs.core.langchain_core.callbacks import BaseCallbackManager from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.prompts import BasePromptTemplate, PromptTemplate from libs.core.langchain_core.pydantic_v1 import Field from libs.core.langchain_core.tools import BaseTool, Tool from langchain_community.agent_toolkits.openapi.planner_prompt import ( API_CONTROLLER_PROMPT, API_CONTROLLER_TOOL_DESCRIPTION, API_CONTROLLER_TOOL_NAME, API_ORCHESTRATOR_PROMPT, API_PLANNER_PROMPT, API_PLANNER_TOOL_DESCRIPTION, API_PLANNER_TOOL_NAME, PARSING_DELETE_PROMPT, PARSING_GET_PROMPT, PARSING_PATCH_PROMPT, PARSING_POST_PROMPT, PARSING_PUT_PROMPT, REQUESTS_DELETE_TOOL_DESCRIPTION, REQUESTS_GET_TOOL_DESCRIPTION, REQUESTS_PATCH_TOOL_DESCRIPTION, REQUESTS_POST_TOOL_DESCRIPTION, REQUESTS_PUT_TOOL_DESCRIPTION, ) from langchain_community.agent_toolkits.openapi.spec import ReducedOpenAPISpec from langchain_community.llms import OpenAI from langchain_community.tools.requests.tool import BaseRequestsTool from langchain_community.utilities.requests import RequestsWrapper # # Requests tools with LLM-instructed extraction of truncated responses. # # Of course, truncating so bluntly may lose a lot of valuable # information in the response. # However, the goal for now is to have only a single inference step. MAX_RESPONSE_LENGTH = 5000 """Maximum length of the response to be returned.""" def _get_default_llm_chain(prompt: BasePromptTemplate) -> Any: from langchain.chains.llm import LLMChain return LLMChain( llm=OpenAI(), prompt=prompt, ) def _get_default_llm_chain_factory( prompt: BasePromptTemplate, ) -> Callable[[], Any]: """Returns a default LLMChain factory.""" return partial(_get_default_llm_chain, prompt) class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool): """Requests GET tool with LLM-instructed extraction of truncated responses.""" name: str = "requests_get" """Tool name.""" description = REQUESTS_GET_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: Any = Field( default_factory=_get_default_llm_chain_factory(PARSING_GET_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: from langchain.output_parsers.json import parse_json_markdown try: data = parse_json_markdown(text) except json.JSONDecodeError as e: raise e data_params = data.get("params") response = self.requests_wrapper.get(data["url"], params=data_params) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool): """Requests POST tool with LLM-instructed extraction of truncated responses.""" name: str = "requests_post" """Tool name.""" description = REQUESTS_POST_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: Any = Field( default_factory=_get_default_llm_chain_factory(PARSING_POST_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: from langchain.output_parsers.json import parse_json_markdown try: data = parse_json_markdown(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.post(data["url"], data["data"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool): """Requests PATCH tool with LLM-instructed extraction of truncated responses.""" name: str = "requests_patch" """Tool name.""" description = REQUESTS_PATCH_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: Any = Field( default_factory=_get_default_llm_chain_factory(PARSING_PATCH_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: from langchain.output_parsers.json import parse_json_markdown try: data = parse_json_markdown(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.patch(data["url"], data["data"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool): """Requests PUT tool with LLM-instructed extraction of truncated responses.""" name: str = "requests_put" """Tool name.""" description = REQUESTS_PUT_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: Any = Field( default_factory=_get_default_llm_chain_factory(PARSING_PUT_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: from langchain.output_parsers.json import parse_json_markdown try: data = parse_json_markdown(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.put(data["url"], data["data"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool): """A tool that sends a DELETE request and parses the response.""" name: str = "requests_delete" """The name of the tool.""" description = REQUESTS_DELETE_TOOL_DESCRIPTION """The description of the tool.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """The maximum length of the response.""" llm_chain: Any = Field( default_factory=_get_default_llm_chain_factory(PARSING_DELETE_PROMPT) ) """The LLM chain used to parse the response.""" def _run(self, text: str) -> str: from langchain.output_parsers.json import parse_json_markdown try: data = parse_json_markdown(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.delete(data["url"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() # # Orchestrator, planner, controller. # def _create_api_planner_tool( api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel ) -> Tool: from langchain.chains.llm import LLMChain endpoint_descriptions = [ f"{name} {description}" for name, description, _ in api_spec.endpoints ] prompt = PromptTemplate( template=API_PLANNER_PROMPT, input_variables=["query"], partial_variables={"endpoints": "- " + "- ".join(endpoint_descriptions)}, ) chain = LLMChain(llm=llm, prompt=prompt) tool = Tool( name=API_PLANNER_TOOL_NAME, description=API_PLANNER_TOOL_DESCRIPTION, func=chain.run, ) return tool def _create_api_controller_agent( api_url: str, api_docs: str, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, ) -> Any: from langchain.agents.agent import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.chains.llm import LLMChain get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT) post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT) tools: List[BaseTool] = [ RequestsGetToolWithParsing( requests_wrapper=requests_wrapper, llm_chain=get_llm_chain ), RequestsPostToolWithParsing( requests_wrapper=requests_wrapper, llm_chain=post_llm_chain ), ] prompt = PromptTemplate( template=API_CONTROLLER_PROMPT, input_variables=["input", "agent_scratchpad"], partial_variables={ "api_url": api_url, "api_docs": api_docs, "tool_names": ", ".join([tool.name for tool in tools]), "tool_descriptions": "\n".join( [f"{tool.name}: {tool.description}" for tool in tools] ), }, ) agent = ZeroShotAgent( llm_chain=LLMChain(llm=llm, prompt=prompt), allowed_tools=[tool.name for tool in tools], ) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True) def _create_api_controller_tool( api_spec: ReducedOpenAPISpec, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, ) -> Tool: """Expose controller as a tool. The tool is invoked with a plan from the planner, and dynamically creates a controller agent with relevant documentation only to constrain the context. """ base_url = api_spec.servers[0]["url"] # TODO: do better. def _create_and_run_api_controller_agent(plan_str: str) -> str: pattern = r"\b(GET|POST|PATCH|DELETE)\s+(/\S+)*" matches = re.findall(pattern, plan_str) endpoint_names = [ "{method} {route}".format(method=method, route=route.split("?")[0]) for method, route in matches ] docs_str = "" for endpoint_name in endpoint_names: found_match = False for name, _, docs in api_spec.endpoints: regex_name = re.compile(re.sub("\{.*?\}", ".*", name)) if regex_name.match(endpoint_name): found_match = True docs_str += f"== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n" if not found_match: raise ValueError(f"{endpoint_name} endpoint does not exist.") agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm) return agent.run(plan_str) return Tool( name=API_CONTROLLER_TOOL_NAME, func=_create_and_run_api_controller_agent, description=API_CONTROLLER_TOOL_DESCRIPTION, ) def create_openapi_agent( api_spec: ReducedOpenAPISpec, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, shared_memory: Optional[Any] = None, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = True, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Instantiate OpenAI API planner and controller for a given spec. Inject credentials via requests_wrapper. We use a top-level "orchestrator" agent to invoke the planner and controller, rather than a top-level planner that invokes a controller with its plan. This is to keep the planner simple. """ from langchain.agents.agent import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.chains.llm import LLMChain tools = [ _create_api_planner_tool(api_spec, llm), _create_api_controller_tool(api_spec, requests_wrapper, llm), ] prompt = PromptTemplate( template=API_ORCHESTRATOR_PROMPT, input_variables=["input", "agent_scratchpad"], partial_variables={ "tool_names": ", ".join([tool.name for tool in tools]), "tool_descriptions": "\n".join( [f"{tool.name}: {tool.description}" for tool in tools] ), }, ) agent = ZeroShotAgent( llm_chain=LLMChain(llm=llm, prompt=prompt, memory=shared_memory), allowed_tools=[tool.name for tool in tools], **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
[ "tool_descriptions", "\n", "tool_names", "agent_scratchpad", "- ", "input", ", ", "endpoints" ]
2024-01-10
mth93/langchain
libs~partners~google_genai~langchain_google_genai~llms.py
from __future__ import annotations from typing import Any, Callable, Dict, Iterator, List, Optional, Union import google.api_core import google.generativeai as genai # type: ignore[import] from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models import LanguageModelInput from libs.core.langchain_core.language_models.llms import BaseLLM, create_base_retry_decorator from libs.core.langchain_core.outputs import Generation, GenerationChunk, LLMResult from libs.core.langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env def _create_retry_decorator( llm: BaseLLM, *, max_retries: int = 1, run_manager: Optional[ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: """Creates a retry decorator for Vertex / Palm LLMs.""" errors = [ google.api_core.exceptions.ResourceExhausted, google.api_core.exceptions.ServiceUnavailable, google.api_core.exceptions.Aborted, google.api_core.exceptions.DeadlineExceeded, google.api_core.exceptions.GoogleAPIError, ] decorator = create_base_retry_decorator( error_types=errors, max_retries=max_retries, run_manager=run_manager ) return decorator def _completion_with_retry( llm: GoogleGenerativeAI, prompt: LanguageModelInput, is_gemini: bool = False, stream: bool = False, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator( llm, max_retries=llm.max_retries, run_manager=run_manager ) @retry_decorator def _completion_with_retry( prompt: LanguageModelInput, is_gemini: bool, stream: bool, **kwargs: Any ) -> Any: generation_config = kwargs.get("generation_config", {}) if is_gemini: return llm.client.generate_content( contents=prompt, stream=stream, generation_config=generation_config ) return llm.client.generate_text(prompt=prompt, **kwargs) return _completion_with_retry( prompt=prompt, is_gemini=is_gemini, stream=stream, **kwargs ) def _is_gemini_model(model_name: str) -> bool: return "gemini" in model_name def _strip_erroneous_leading_spaces(text: str) -> str: """Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == " " for line in text.split("\n")[1:]) if has_leading_space: return text.replace("\n ", "\n") else: return text class GoogleGenerativeAI(BaseLLM, BaseModel): """Google GenerativeAI models. Example: .. code-block:: python from langchain_google_genai import GoogleGenerativeAI llm = GoogleGenerativeAI(model="gemini-pro") """ client: Any #: :meta private: model: str = Field( ..., description="""The name of the model to use. Supported examples: - gemini-pro - models/text-bison-001""", ) """Model name to use.""" google_api_key: Optional[SecretStr] = None temperature: float = 0.7 """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" max_output_tokens: Optional[int] = None """Maximum number of tokens to include in a candidate. Must be greater than zero. If unset, will default to 64.""" n: int = 1 """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" max_retries: int = 6 """The maximum number of retries to make when generating.""" @property def is_gemini(self) -> bool: """Returns whether a model is belongs to a Gemini family or not.""" return _is_gemini_model(self.model) @property def lc_secrets(self) -> Dict[str, str]: return {"google_api_key": "GOOGLE_API_KEY"} @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" ) model_name = values["model"] if isinstance(google_api_key, SecretStr): google_api_key = google_api_key.get_secret_value() genai.configure(api_key=google_api_key) if _is_gemini_model(model_name): values["client"] = genai.GenerativeModel(model_name=model_name) else: values["client"] = genai if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: raise ValueError("top_p must be in the range [0.0, 1.0]") if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0: raise ValueError("max_output_tokens must be greater than zero") return values def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: generations: List[List[Generation]] = [] generation_config = { "stop_sequences": stop, "temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "max_output_tokens": self.max_output_tokens, "candidate_count": self.n, } for prompt in prompts: if self.is_gemini: res = _completion_with_retry( self, prompt=prompt, stream=False, is_gemini=True, run_manager=run_manager, generation_config=generation_config, ) candidates = [ "".join([p.text for p in c.content.parts]) for c in res.candidates ] generations.append([Generation(text=c) for c in candidates]) else: res = _completion_with_retry( self, model=self.model, prompt=prompt, stream=False, is_gemini=False, run_manager=run_manager, **generation_config, ) prompt_generations = [] for candidate in res.candidates: raw_text = candidate["output"] stripped_text = _strip_erroneous_leading_spaces(raw_text) prompt_generations.append(Generation(text=stripped_text)) generations.append(prompt_generations) return LLMResult(generations=generations) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: generation_config = kwargs.get("generation_config", {}) if stop: generation_config["stop_sequences"] = stop for stream_resp in _completion_with_retry( self, prompt, stream=True, is_gemini=True, run_manager=run_manager, generation_config=generation_config, **kwargs, ): chunk = GenerationChunk(text=stream_resp.text) yield chunk if run_manager: run_manager.on_llm_new_token( stream_resp.text, chunk=chunk, verbose=self.verbose, ) @property def _llm_type(self) -> str: """Return type of llm.""" return "google_palm" def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text. Useful for checking if an input will fit in a model's context window. Args: text: The string input to tokenize. Returns: The integer number of tokens in the text. """ if self.is_gemini: raise ValueError("Counting tokens is not yet supported!") result = self.client.count_text_tokens(model=self.model, prompt=text) return result["token_count"]
[ "[]" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~utilities~searx_search.py
"""Utility for using SearxNG meta search API. SearxNG is a privacy-friendly free metasearch engine that aggregates results from `multiple search engines <https://docs.searxng.org/admin/engines/configured_engines.html>`_ and databases and supports the `OpenSearch <https://github.com/dewitt/opensearch/blob/master/opensearch-1-1-draft-6.md>`_ specification. More details on the installation instructions `here. <../../integrations/searx.html>`_ For the search API refer to https://docs.searxng.org/dev/search_api.html Quick Start ----------- In order to use this utility you need to provide the searx host. This can be done by passing the named parameter :attr:`searx_host <SearxSearchWrapper.searx_host>` or exporting the environment variable SEARX_HOST. Note: this is the only required parameter. Then create a searx search instance like this: .. code-block:: python from langchain_community.utilities import SearxSearchWrapper # when the host starts with `http` SSL is disabled and the connection # is assumed to be on a private network searx_host='http://self.hosted' search = SearxSearchWrapper(searx_host=searx_host) You can now use the ``search`` instance to query the searx API. Searching --------- Use the :meth:`run() <SearxSearchWrapper.run>` and :meth:`results() <SearxSearchWrapper.results>` methods to query the searx API. Other methods are available for convenience. :class:`SearxResults` is a convenience wrapper around the raw json result. Example usage of the ``run`` method to make a search: .. code-block:: python s.run(query="what is the best search engine?") Engine Parameters ----------------- You can pass any `accepted searx search API <https://docs.searxng.org/dev/search_api.html>`_ parameters to the :py:class:`SearxSearchWrapper` instance. In the following example we are using the :attr:`engines <SearxSearchWrapper.engines>` and the ``language`` parameters: .. code-block:: python # assuming the searx host is set as above or exported as an env variable s = SearxSearchWrapper(engines=['google', 'bing'], language='es') Search Tips ----------- Searx offers a special `search syntax <https://docs.searxng.org/user/index.html#search-syntax>`_ that can also be used instead of passing engine parameters. For example the following query: .. code-block:: python s = SearxSearchWrapper("langchain library", engines=['github']) # can also be written as: s = SearxSearchWrapper("langchain library !github") # or even: s = SearxSearchWrapper("langchain library !gh") In some situations you might want to pass an extra string to the search query. For example when the `run()` method is called by an agent. The search suffix can also be used as a way to pass extra parameters to searx or the underlying search engines. .. code-block:: python # select the github engine and pass the search suffix s = SearchWrapper("langchain library", query_suffix="!gh") s = SearchWrapper("langchain library") # select github the conventional google search syntax s.run("large language models", query_suffix="site:github.com") *NOTE*: A search suffix can be defined on both the instance and the method level. The resulting query will be the concatenation of the two with the former taking precedence. See `SearxNG Configured Engines <https://docs.searxng.org/admin/engines/configured_engines.html>`_ and `SearxNG Search Syntax <https://docs.searxng.org/user/index.html#id1>`_ for more details. Notes ----- This wrapper is based on the SearxNG fork https://github.com/searxng/searxng which is better maintained than the original Searx project and offers more features. Public searxNG instances often use a rate limiter for API usage, so you might want to use a self hosted instance and disable the rate limiter. If you are self-hosting an instance you can customize the rate limiter for your own network as described `here <https://docs.searxng.org/src/searx.botdetection.html#limiter-src>`_. For a list of public SearxNG instances see https://searx.space/ """ import json from typing import Any, Dict, List, Optional import aiohttp import requests from libs.core.langchain_core.pydantic_v1 import ( BaseModel, Extra, Field, PrivateAttr, root_validator, validator, ) from libs.core.langchain_core.utils import get_from_dict_or_env def _get_default_params() -> dict: return {"language": "en", "format": "json"} class SearxResults(dict): """Dict like wrapper around search api results.""" _data: str = "" def __init__(self, data: str): """Take a raw result from Searx and make it into a dict like object.""" json_data = json.loads(data) super().__init__(json_data) self.__dict__ = self def __str__(self) -> str: """Text representation of searx result.""" return self._data @property def results(self) -> Any: """Silence mypy for accessing this field. :meta private: """ return self.get("results") @property def answers(self) -> Any: """Helper accessor on the json result.""" return self.get("answers") class SearxSearchWrapper(BaseModel): """Wrapper for Searx API. To use you need to provide the searx host by passing the named parameter ``searx_host`` or exporting the environment variable ``SEARX_HOST``. In some situations you might want to disable SSL verification, for example if you are running searx locally. You can do this by passing the named parameter ``unsecure``. You can also pass the host url scheme as ``http`` to disable SSL. Example: .. code-block:: python from langchain_community.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://localhost:8888") Example with SSL disabled: .. code-block:: python from langchain_community.utilities import SearxSearchWrapper # note the unsecure parameter is not needed if you pass the url scheme as # http searx = SearxSearchWrapper(searx_host="http://localhost:8888", unsecure=True) """ _result: SearxResults = PrivateAttr() searx_host: str = "" unsecure: bool = False params: dict = Field(default_factory=_get_default_params) headers: Optional[dict] = None engines: Optional[List[str]] = [] categories: Optional[List[str]] = [] query_suffix: Optional[str] = "" k: int = 10 aiosession: Optional[Any] = None @validator("unsecure") def disable_ssl_warnings(cls, v: bool) -> bool: """Disable SSL warnings.""" if v: # requests.urllib3.disable_warnings() try: import urllib3 urllib3.disable_warnings() except ImportError as e: print(e) return v @root_validator() def validate_params(cls, values: Dict) -> Dict: """Validate that custom searx params are merged with default ones.""" user_params = values["params"] default = _get_default_params() values["params"] = {**default, **user_params} engines = values.get("engines") if engines: values["params"]["engines"] = ",".join(engines) categories = values.get("categories") if categories: values["params"]["categories"] = ",".join(categories) searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST") if not searx_host.startswith("http"): print( f"Warning: missing the url scheme on host \ ! assuming secure https://{searx_host} " ) searx_host = "https://" + searx_host elif searx_host.startswith("http://"): values["unsecure"] = True cls.disable_ssl_warnings(True) values["searx_host"] = searx_host return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _searx_api_query(self, params: dict) -> SearxResults: """Actual request to searx API.""" raw_result = requests.get( self.searx_host, headers=self.headers, params=params, verify=not self.unsecure, ) # test if http result is ok if not raw_result.ok: raise ValueError("Searx API returned an error: ", raw_result.text) res = SearxResults(raw_result.text) self._result = res return res async def _asearx_api_query(self, params: dict) -> SearxResults: if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.get( self.searx_host, headers=self.headers, params=params, ssl=(lambda: False if self.unsecure else None)(), ) as response: if not response.ok: raise ValueError("Searx API returned an error: ", response.text) result = SearxResults(await response.text()) self._result = result else: async with self.aiosession.get( self.searx_host, headers=self.headers, params=params, verify=not self.unsecure, ) as response: if not response.ok: raise ValueError("Searx API returned an error: ", response.text) result = SearxResults(await response.text()) self._result = result return result def run( self, query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> str: """Run query through Searx API and parse results. You can pass any other params to the searx query API. Args: query: The query to search for. query_suffix: Extra suffix appended to the query. engines: List of engines to use for the query. categories: List of categories to use for the query. **kwargs: extra parameters to pass to the searx API. Returns: str: The result of the query. Raises: ValueError: If an error occurred with the query. Example: This will make a query to the qwant engine: .. code-block:: python from langchain_community.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://my.searx.host") searx.run("what is the weather in France ?", engine="qwant") # the same result can be achieved using the `!` syntax of searx # to select the engine using `query_suffix` searx.run("what is the weather in France ?", query_suffix="!qwant") """ _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) if isinstance(categories, list) and len(categories) > 0: params["categories"] = ",".join(categories) res = self._searx_api_query(params) if len(res.answers) > 0: toret = res.answers[0] # only return the content of the results list elif len(res.results) > 0: toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]]) else: toret = "No good search result found" return toret async def arun( self, query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> str: """Asynchronously version of `run`.""" _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) res = await self._asearx_api_query(params) if len(res.answers) > 0: toret = res.answers[0] # only return the content of the results list elif len(res.results) > 0: toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]]) else: toret = "No good search result found" return toret def results( self, query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> List[Dict]: """Run query through Searx API and returns the results with metadata. Args: query: The query to search for. query_suffix: Extra suffix appended to the query. num_results: Limit the number of results to return. engines: List of engines to use for the query. categories: List of categories to use for the query. **kwargs: extra parameters to pass to the searx API. Returns: Dict with the following keys: { snippet: The description of the result. title: The title of the result. link: The link to the result. engines: The engines used for the result. category: Searx category of the result. } """ _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) if isinstance(categories, list) and len(categories) > 0: params["categories"] = ",".join(categories) results = self._searx_api_query(params).results[:num_results] if len(results) == 0: return [{"Result": "No good Search Result was found"}] return [ { "snippet": result.get("content", ""), "title": result["title"], "link": result["url"], "engines": result["engines"], "category": result["category"], } for result in results ] async def aresults( self, query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> List[Dict]: """Asynchronously query with json results. Uses aiohttp. See `results` for more info. """ _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) results = (await self._asearx_api_query(params)).results[:num_results] if len(results) == 0: return [{"Result": "No good Search Result was found"}] return [ { "snippet": result.get("content", ""), "title": result["title"], "link": result["url"], "engines": result["engines"], "category": result["category"], } for result in results ]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~retrievers~time_weighted_retriever.py
import datetime from copy import deepcopy from typing import Any, Dict, List, Optional, Tuple from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import Field from libs.core.langchain_core.retrievers import BaseRetriever from libs.core.langchain_core.vectorstores import VectorStore from langchain.callbacks.manager import CallbackManagerForRetrieverRun def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float: """Get the hours passed between two datetimes.""" return (time - ref_time).total_seconds() / 3600 class TimeWeightedVectorStoreRetriever(BaseRetriever): """Retriever that combines embedding similarity with recency in retrieving values.""" vectorstore: VectorStore """The vectorstore to store documents and determine salience.""" search_kwargs: dict = Field(default_factory=lambda: dict(k=100)) """Keyword arguments to pass to the vectorstore similarity search.""" # TODO: abstract as a queue memory_stream: List[Document] = Field(default_factory=list) """The memory_stream of documents to search through.""" decay_rate: float = Field(default=0.01) """The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).""" k: int = 4 """The maximum number of documents to retrieve in a given call.""" other_score_keys: List[str] = [] """Other keys in the metadata to factor into the score, e.g. 'importance'.""" default_salience: Optional[float] = None """The salience to assign memories not retrieved from the vector store. None assigns no salience to documents not fetched from the vector store. """ class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _document_get_date(self, field: str, document: Document) -> datetime.datetime: """Return the value of the date field of a document.""" if field in document.metadata: if isinstance(document.metadata[field], float): return datetime.datetime.fromtimestamp(document.metadata[field]) return document.metadata[field] return datetime.datetime.now() def _get_combined_score( self, document: Document, vector_relevance: Optional[float], current_time: datetime.datetime, ) -> float: """Return the combined score for a document.""" hours_passed = _get_hours_passed( current_time, self._document_get_date("last_accessed_at", document), ) score = (1.0 - self.decay_rate) ** hours_passed for key in self.other_score_keys: if key in document.metadata: score += document.metadata[key] if vector_relevance is not None: score += vector_relevance return score def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]: """Return documents that are salient to the query.""" docs_and_scores: List[Tuple[Document, float]] docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs ) results = {} for fetched_doc, relevance in docs_and_scores: if "buffer_idx" in fetched_doc.metadata: buffer_idx = fetched_doc.metadata["buffer_idx"] doc = self.memory_stream[buffer_idx] results[buffer_idx] = (doc, relevance) return results def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Return documents that are relevant to the query.""" current_time = datetime.datetime.now() docs_and_scores = { doc.metadata["buffer_idx"]: (doc, self.default_salience) for doc in self.memory_stream[-self.k :] } # If a doc is considered salient, update the salience score docs_and_scores.update(self.get_salient_docs(query)) rescored_docs = [ (doc, self._get_combined_score(doc, relevance, current_time)) for doc, relevance in docs_and_scores.values() ] rescored_docs.sort(key=lambda x: x[1], reverse=True) result = [] # Ensure frequently accessed memories aren't forgotten for doc, _ in rescored_docs[: self.k]: # TODO: Update vector store doc once `update` method is exposed. buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]] buffered_doc.metadata["last_accessed_at"] = current_time result.append(buffered_doc) return result def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return self.vectorstore.add_documents(dup_docs, **kwargs) async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~parsers~docai.py
"""Module contains a PDF parser based on Document AI from Google Cloud. You need to install two libraries to use this parser: pip install google-cloud-documentai pip install google-cloud-documentai-toolbox """ import logging import re import time from dataclasses import dataclass from typing import TYPE_CHECKING, Iterator, List, Optional, Sequence from libs.core.langchain_core.documents import Document from libs.core.langchain_core.utils.iter import batch_iterate from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob from langchain_community.utilities.vertexai import get_client_info if TYPE_CHECKING: from google.api_core.operation import Operation from google.cloud.documentai import DocumentProcessorServiceClient logger = logging.getLogger(__name__) @dataclass class DocAIParsingResults: """A dataclass to store Document AI parsing results.""" source_path: str parsed_path: str class DocAIParser(BaseBlobParser): """`Google Cloud Document AI` parser. For a detailed explanation of Document AI, refer to the product documentation. https://cloud.google.com/document-ai/docs/overview """ def __init__( self, *, client: Optional["DocumentProcessorServiceClient"] = None, location: Optional[str] = None, gcs_output_path: Optional[str] = None, processor_name: Optional[str] = None, ): """Initializes the parser. Args: client: a DocumentProcessorServiceClient to use location: a Google Cloud location where a Document AI processor is located gcs_output_path: a path on Google Cloud Storage to store parsing results processor_name: full resource name of a Document AI processor or processor version You should provide either a client or location (and then a client would be instantiated). """ if bool(client) == bool(location): raise ValueError( "You must specify either a client or a location to instantiate " "a client." ) pattern = r"projects\/[0-9]+\/locations\/[a-z\-0-9]+\/processors\/[a-z0-9]+" if processor_name and not re.fullmatch(pattern, processor_name): raise ValueError( f"Processor name {processor_name} has the wrong format. If your " "prediction endpoint looks like https://us-documentai.googleapis.com" "/v1/projects/PROJECT_ID/locations/us/processors/PROCESSOR_ID:process," " use only projects/PROJECT_ID/locations/us/processors/PROCESSOR_ID " "part." ) self._gcs_output_path = gcs_output_path self._processor_name = processor_name if client: self._client = client else: try: from google.api_core.client_options import ClientOptions from google.cloud.documentai import DocumentProcessorServiceClient except ImportError as exc: raise ImportError( "documentai package not found, please install it with" " `pip install google-cloud-documentai`" ) from exc options = ClientOptions( api_endpoint=f"{location}-documentai.googleapis.com" ) self._client = DocumentProcessorServiceClient( client_options=options, client_info=get_client_info(module="document-ai"), ) def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Parses a blob lazily. Args: blobs: a Blob to parse This is a long-running operation. A recommended way is to batch documents together and use the `batch_parse()` method. """ yield from self.batch_parse([blob], gcs_output_path=self._gcs_output_path) def online_process( self, blob: Blob, enable_native_pdf_parsing: bool = True, field_mask: Optional[str] = None, page_range: Optional[List[int]] = None, ) -> Iterator[Document]: """Parses a blob lazily using online processing. Args: blob: a blob to parse. enable_native_pdf_parsing: enable pdf embedded text extraction field_mask: a comma-separated list of which fields to include in the Document AI response. suggested: "text,pages.pageNumber,pages.layout" page_range: list of page numbers to parse. If `None`, entire document will be parsed. """ try: from google.cloud import documentai from google.cloud.documentai_v1.types import ( IndividualPageSelector, OcrConfig, ProcessOptions, ) except ImportError as exc: raise ImportError( "documentai package not found, please install it with" " `pip install google-cloud-documentai`" ) from exc try: from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout except ImportError as exc: raise ImportError( "documentai_toolbox package not found, please install it with" " `pip install google-cloud-documentai-toolbox`" ) from exc ocr_config = ( OcrConfig(enable_native_pdf_parsing=enable_native_pdf_parsing) if enable_native_pdf_parsing else None ) individual_page_selector = ( IndividualPageSelector(pages=page_range) if page_range else None ) response = self._client.process_document( documentai.ProcessRequest( name=self._processor_name, gcs_document=documentai.GcsDocument( gcs_uri=blob.path, mime_type=blob.mimetype or "application/pdf", ), process_options=ProcessOptions( ocr_config=ocr_config, individual_page_selector=individual_page_selector, ), skip_human_review=True, field_mask=field_mask, ) ) yield from ( Document( page_content=_text_from_layout(page.layout, response.document.text), metadata={ "page": page.page_number, "source": blob.path, }, ) for page in response.document.pages ) def batch_parse( self, blobs: Sequence[Blob], gcs_output_path: Optional[str] = None, timeout_sec: int = 3600, check_in_interval_sec: int = 60, ) -> Iterator[Document]: """Parses a list of blobs lazily. Args: blobs: a list of blobs to parse. gcs_output_path: a path on Google Cloud Storage to store parsing results. timeout_sec: a timeout to wait for Document AI to complete, in seconds. check_in_interval_sec: an interval to wait until next check whether parsing operations have been completed, in seconds This is a long-running operation. A recommended way is to decouple parsing from creating LangChain Documents: >>> operations = parser.docai_parse(blobs, gcs_path) >>> parser.is_running(operations) You can get operations names and save them: >>> names = [op.operation.name for op in operations] And when all operations are finished, you can use their results: >>> operations = parser.operations_from_names(operation_names) >>> results = parser.get_results(operations) >>> docs = parser.parse_from_results(results) """ output_path = gcs_output_path or self._gcs_output_path if not output_path: raise ValueError( "An output path on Google Cloud Storage should be provided." ) operations = self.docai_parse(blobs, gcs_output_path=output_path) operation_names = [op.operation.name for op in operations] logger.debug( "Started parsing with Document AI, submitted operations %s", operation_names ) time_elapsed = 0 while self.is_running(operations): time.sleep(check_in_interval_sec) time_elapsed += check_in_interval_sec if time_elapsed > timeout_sec: raise TimeoutError( "Timeout exceeded! Check operations " f"{operation_names} later!" ) logger.debug(".") results = self.get_results(operations=operations) yield from self.parse_from_results(results) def parse_from_results( self, results: List[DocAIParsingResults] ) -> Iterator[Document]: try: from google.cloud.documentai_toolbox.utilities.gcs_utilities import ( split_gcs_uri, ) from google.cloud.documentai_toolbox.wrappers.document import _get_shards from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout except ImportError as exc: raise ImportError( "documentai_toolbox package not found, please install it with" " `pip install google-cloud-documentai-toolbox`" ) from exc for result in results: gcs_bucket_name, gcs_prefix = split_gcs_uri(result.parsed_path) shards = _get_shards(gcs_bucket_name, gcs_prefix) yield from ( Document( page_content=_text_from_layout(page.layout, shard.text), metadata={"page": page.page_number, "source": result.source_path}, ) for shard in shards for page in shard.pages ) def operations_from_names(self, operation_names: List[str]) -> List["Operation"]: """Initializes Long-Running Operations from their names.""" try: from google.longrunning.operations_pb2 import ( GetOperationRequest, # type: ignore ) except ImportError as exc: raise ImportError( "long running operations package not found, please install it with" " `pip install gapic-google-longrunning`" ) from exc return [ self._client.get_operation(request=GetOperationRequest(name=name)) for name in operation_names ] def is_running(self, operations: List["Operation"]) -> bool: return any(not op.done() for op in operations) def docai_parse( self, blobs: Sequence[Blob], *, gcs_output_path: Optional[str] = None, processor_name: Optional[str] = None, batch_size: int = 1000, enable_native_pdf_parsing: bool = True, field_mask: Optional[str] = None, ) -> List["Operation"]: """Runs Google Document AI PDF Batch Processing on a list of blobs. Args: blobs: a list of blobs to be parsed gcs_output_path: a path (folder) on GCS to store results processor_name: name of a Document AI processor. batch_size: amount of documents per batch enable_native_pdf_parsing: a config option for the parser field_mask: a comma-separated list of which fields to include in the Document AI response. suggested: "text,pages.pageNumber,pages.layout" Document AI has a 1000 file limit per batch, so batches larger than that need to be split into multiple requests. Batch processing is an async long-running operation and results are stored in a output GCS bucket. """ try: from google.cloud import documentai from google.cloud.documentai_v1.types import OcrConfig, ProcessOptions except ImportError as exc: raise ImportError( "documentai package not found, please install it with" " `pip install google-cloud-documentai`" ) from exc output_path = gcs_output_path or self._gcs_output_path if output_path is None: raise ValueError( "An output path on Google Cloud Storage should be provided." ) processor_name = processor_name or self._processor_name if processor_name is None: raise ValueError("A Document AI processor name should be provided.") operations = [] for batch in batch_iterate(size=batch_size, iterable=blobs): input_config = documentai.BatchDocumentsInputConfig( gcs_documents=documentai.GcsDocuments( documents=[ documentai.GcsDocument( gcs_uri=blob.path, mime_type=blob.mimetype or "application/pdf", ) for blob in batch ] ) ) output_config = documentai.DocumentOutputConfig( gcs_output_config=documentai.DocumentOutputConfig.GcsOutputConfig( gcs_uri=output_path, field_mask=field_mask ) ) process_options = ( ProcessOptions( ocr_config=OcrConfig( enable_native_pdf_parsing=enable_native_pdf_parsing ) ) if enable_native_pdf_parsing else None ) operations.append( self._client.batch_process_documents( documentai.BatchProcessRequest( name=processor_name, input_documents=input_config, document_output_config=output_config, process_options=process_options, skip_human_review=True, ) ) ) return operations def get_results(self, operations: List["Operation"]) -> List[DocAIParsingResults]: try: from google.cloud.documentai_v1 import BatchProcessMetadata except ImportError as exc: raise ImportError( "documentai package not found, please install it with" " `pip install google-cloud-documentai`" ) from exc return [ DocAIParsingResults( source_path=status.input_gcs_source, parsed_path=status.output_gcs_destination, ) for op in operations for status in ( op.metadata.individual_process_statuses if isinstance(op.metadata, BatchProcessMetadata) else BatchProcessMetadata.deserialize( op.metadata.value ).individual_process_statuses ) ]
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~chat_models~test_konko.py
"""Evaluate ChatKonko Interface.""" from typing import Any import pytest from libs.core.langchain_core.callbacks import CallbackManager from libs.core.langchain_core.messages import BaseMessage, HumanMessage, SystemMessage from libs.core.langchain_core.outputs import ChatGeneration, ChatResult, LLMResult from langchain_community.chat_models.konko import ChatKonko from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_konko_chat_test() -> None: """Evaluate basic ChatKonko functionality.""" chat_instance = ChatKonko(max_tokens=10) msg = HumanMessage(content="Hi") chat_response = chat_instance([msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) def test_konko_chat_test_openai() -> None: """Evaluate basic ChatKonko functionality.""" chat_instance = ChatKonko(max_tokens=10, model="gpt-3.5-turbo") msg = HumanMessage(content="Hi") chat_response = chat_instance([msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) def test_konko_model_test() -> None: """Check how ChatKonko manages model_name.""" chat_instance = ChatKonko(model="alpha") assert chat_instance.model == "alpha" chat_instance = ChatKonko(model="beta") assert chat_instance.model == "beta" def test_konko_available_model_test() -> None: """Check how ChatKonko manages model_name.""" chat_instance = ChatKonko(max_tokens=10, n=2) res = chat_instance.get_available_models() assert isinstance(res, set) def test_konko_system_msg_test() -> None: """Evaluate ChatKonko's handling of system messages.""" chat_instance = ChatKonko(max_tokens=10) sys_msg = SystemMessage(content="Initiate user chat.") user_msg = HumanMessage(content="Hi there") chat_response = chat_instance([sys_msg, user_msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) def test_konko_generation_test() -> None: """Check ChatKonko's generation ability.""" chat_instance = ChatKonko(max_tokens=10, n=2) msg = HumanMessage(content="Hi") gen_response = chat_instance.generate([[msg], [msg]]) assert isinstance(gen_response, LLMResult) assert len(gen_response.generations) == 2 for gen_list in gen_response.generations: assert len(gen_list) == 2 for gen in gen_list: assert isinstance(gen, ChatGeneration) assert isinstance(gen.text, str) assert gen.text == gen.message.content def test_konko_multiple_outputs_test() -> None: """Test multiple completions with ChatKonko.""" chat_instance = ChatKonko(max_tokens=10, n=5) msg = HumanMessage(content="Hi") gen_response = chat_instance._generate([msg]) assert isinstance(gen_response, ChatResult) assert len(gen_response.generations) == 5 for gen in gen_response.generations: assert isinstance(gen.message, BaseMessage) assert isinstance(gen.message.content, str) def test_konko_streaming_callback_test() -> None: """Evaluate streaming's token callback functionality.""" callback_instance = FakeCallbackHandler() callback_mgr = CallbackManager([callback_instance]) chat_instance = ChatKonko( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_mgr, verbose=True, ) msg = HumanMessage(content="Hi") chat_response = chat_instance([msg]) assert callback_instance.llm_streams > 0 assert isinstance(chat_response, BaseMessage) def test_konko_streaming_info_test() -> None: """Ensure generation details are retained during streaming.""" class TestCallback(FakeCallbackHandler): data_store: dict = {} def on_llm_end(self, *args: Any, **kwargs: Any) -> Any: self.data_store["generation"] = args[0] callback_instance = TestCallback() callback_mgr = CallbackManager([callback_instance]) chat_instance = ChatKonko( max_tokens=2, temperature=0, callback_manager=callback_mgr, ) list(chat_instance.stream("hey")) gen_data = callback_instance.data_store["generation"] assert gen_data.generations[0][0].text == " Hey" def test_konko_llm_model_name_test() -> None: """Check if llm_output has model info.""" chat_instance = ChatKonko(max_tokens=10) msg = HumanMessage(content="Hi") llm_data = chat_instance.generate([[msg]]) assert llm_data.llm_output is not None assert llm_data.llm_output["model_name"] == chat_instance.model def test_konko_streaming_model_name_test() -> None: """Check model info during streaming.""" chat_instance = ChatKonko(max_tokens=10, streaming=True) msg = HumanMessage(content="Hi") llm_data = chat_instance.generate([[msg]]) assert llm_data.llm_output is not None assert llm_data.llm_output["model_name"] == chat_instance.model def test_konko_streaming_param_validation_test() -> None: """Ensure correct token callback during streaming.""" with pytest.raises(ValueError): ChatKonko( max_tokens=10, streaming=True, temperature=0, n=5, ) def test_konko_additional_args_test() -> None: """Evaluate extra arguments for ChatKonko.""" chat_instance = ChatKonko(extra=3, max_tokens=10) assert chat_instance.max_tokens == 10 assert chat_instance.model_kwargs == {"extra": 3} chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2}) assert chat_instance.model_kwargs == {"extra": 3, "addition": 2} with pytest.raises(ValueError): ChatKonko(extra=3, model_kwargs={"extra": 2}) with pytest.raises(ValueError): ChatKonko(model_kwargs={"temperature": 0.2}) with pytest.raises(ValueError): ChatKonko(model_kwargs={"model": "gpt-3.5-turbo-instruct"}) def test_konko_token_streaming_test() -> None: """Check token streaming for ChatKonko.""" chat_instance = ChatKonko(max_tokens=10) for token in chat_instance.stream("Just a test"): assert isinstance(token.content, str)
[ "Hi there", "Hi", "Initiate user chat." ]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~playwright~navigate.py
from __future__ import annotations from typing import Optional, Type from urllib.parse import urlparse from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from libs.core.langchain_core.pydantic_v1 import BaseModel, Field, validator from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.playwright.utils import ( aget_current_page, get_current_page, ) class NavigateToolInput(BaseModel): """Input for NavigateToolInput.""" url: str = Field(..., description="url to navigate to") @validator("url") def validate_url_scheme(cls, url: str) -> str: """Check that the URL scheme is valid.""" parsed_url = urlparse(url) if parsed_url.scheme not in ("http", "https"): raise ValueError("URL scheme must be 'http' or 'https'") return url class NavigateTool(BaseBrowserTool): """Tool for navigating a browser to a URL. **Security Note**: This tool provides code to control web-browser navigation. This tool can navigate to any URL, including internal network URLs, and URLs exposed on the server itself. However, if exposing this tool to end-users, consider limiting network access to the server that hosts the agent. By default, the URL scheme has been limited to 'http' and 'https' to prevent navigation to local file system URLs (or other schemes). If access to the local file system is required, consider creating a custom tool or providing a custom args_schema that allows the desired URL schemes. See https://python.langchain.com/docs/security for more information. """ name: str = "navigate_browser" description: str = "Navigate a browser to the specified URL" args_schema: Type[BaseModel] = NavigateToolInput def _run( self, url: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) response = page.goto(url) status = response.status if response else "unknown" return f"Navigating to {url} returned status code {status}" async def _arun( self, url: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) response = await page.goto(url) status = response.status if response else "unknown" return f"Navigating to {url} returned status code {status}"
[ "Navigate a browser to the specified URL" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~rss.py
import logging from typing import Any, Iterator, List, Optional, Sequence from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.document_loaders.news import NewsURLLoader logger = logging.getLogger(__name__) class RSSFeedLoader(BaseLoader): """Load news articles from `RSS` feeds using `Unstructured`. Args: urls: URLs for RSS feeds to load. Each articles in the feed is loaded into its own document. opml: OPML file to load feed urls from. Only one of urls or opml should be provided. The value can be a URL string, or OPML markup contents as byte or string. continue_on_failure: If True, continue loading documents even if loading fails for a particular URL. show_progress_bar: If True, use tqdm to show a loading progress bar. Requires tqdm to be installed, ``pip install tqdm``. **newsloader_kwargs: Any additional named arguments to pass to NewsURLLoader. Example: .. code-block:: python from langchain_community.document_loaders import RSSFeedLoader loader = RSSFeedLoader( urls=["<url-1>", "<url-2>"], ) docs = loader.load() The loader uses feedparser to parse RSS feeds. The feedparser library is not installed by default so you should install it if using this loader: https://pythonhosted.org/feedparser/ If you use OPML, you should also install listparser: https://pythonhosted.org/listparser/ Finally, newspaper is used to process each article: https://newspaper.readthedocs.io/en/latest/ """ # noqa: E501 def __init__( self, urls: Optional[Sequence[str]] = None, opml: Optional[str] = None, continue_on_failure: bool = True, show_progress_bar: bool = False, **newsloader_kwargs: Any, ) -> None: """Initialize with urls or OPML.""" if (urls is None) == ( opml is None ): # This is True if both are None or neither is None raise ValueError( "Provide either the urls or the opml argument, but not both." ) self.urls = urls self.opml = opml self.continue_on_failure = continue_on_failure self.show_progress_bar = show_progress_bar self.newsloader_kwargs = newsloader_kwargs def load(self) -> List[Document]: iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e iter = tqdm(iter) return list(iter) @property def _get_urls(self) -> Sequence[str]: if self.urls: return self.urls try: import listparser except ImportError as e: raise ImportError( "Package listparser must be installed if the opml arg is used. " "Please install with 'pip install listparser' or use the " "urls arg instead." ) from e rss = listparser.parse(self.opml) return [feed.url for feed in rss.feeds] def lazy_load(self) -> Iterator[Document]: try: import feedparser # noqa:F401 except ImportError: raise ImportError( "feedparser package not found, please install it with " "`pip install feedparser`" ) for url in self._get_urls: try: feed = feedparser.parse(url) if getattr(feed, "bozo", False): raise ValueError( f"Error fetching {url}, exception: {feed.bozo_exception}" ) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching {url}, exception: {e}") continue else: raise e try: for entry in feed.entries: loader = NewsURLLoader( urls=[entry.link], **self.newsloader_kwargs, ) article = loader.load()[0] article.metadata["feed"] = url yield article except Exception as e: if self.continue_on_failure: logger.error(f"Error processing entry {entry.link}, exception: {e}") continue else: raise e
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~sql~toolkit.py
"""Toolkit for interacting with an SQL database.""" from typing import List from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.pydantic_v1 import Field from langchain_community.agent_toolkits.base import BaseToolkit from langchain_community.tools import BaseTool from langchain_community.tools.sql_database.tool import ( InfoSQLDatabaseTool, ListSQLDatabaseTool, QuerySQLCheckerTool, QuerySQLDataBaseTool, ) from langchain_community.utilities.sql_database import SQLDatabase class SQLDatabaseToolkit(BaseToolkit): """Toolkit for interacting with SQL databases.""" db: SQLDatabase = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) @property def dialect(self) -> str: """Return string representation of SQL dialect to use.""" return self.db.dialect class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" list_sql_database_tool = ListSQLDatabaseTool(db=self.db) info_sql_database_tool_description = ( "Input to this tool is a comma-separated list of tables, output is the " "schema and sample rows for those tables. " "Be sure that the tables actually exist by calling " f"{list_sql_database_tool.name} first! " "Example Input: table1, table2, table3" ) info_sql_database_tool = InfoSQLDatabaseTool( db=self.db, description=info_sql_database_tool_description ) query_sql_database_tool_description = ( "Input to this tool is a detailed and correct SQL query, output is a " "result from the database. If the query is not correct, an error message " "will be returned. If an error is returned, rewrite the query, check the " "query, and try again. If you encounter an issue with Unknown column " f"'xxxx' in 'field list', use {info_sql_database_tool.name} " "to query the correct table fields." ) query_sql_database_tool = QuerySQLDataBaseTool( db=self.db, description=query_sql_database_tool_description ) query_sql_checker_tool_description = ( "Use this tool to double check if your query is correct before executing " "it. Always use this tool before executing a query with " f"{query_sql_database_tool.name}!" ) query_sql_checker_tool = QuerySQLCheckerTool( db=self.db, llm=self.llm, description=query_sql_checker_tool_description ) return [ query_sql_database_tool, info_sql_database_tool, list_sql_database_tool, query_sql_checker_tool, ]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~airbyte_json.py
import json from typing import List from libs.core.langchain_core.documents import Document from libs.core.langchain_core.utils import stringify_dict from langchain_community.document_loaders.base import BaseLoader class AirbyteJSONLoader(BaseLoader): """Load local `Airbyte` json files.""" def __init__(self, file_path: str): """Initialize with a file path. This should start with '/tmp/airbyte_local/'.""" self.file_path = file_path """Path to the directory containing the json files.""" def load(self) -> List[Document]: text = "" for line in open(self.file_path, "r"): data = json.loads(line)["_airbyte_data"] text += stringify_dict(data) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~retrievers~merger_retriever.py
import asyncio from typing import List from libs.core.langchain_core.documents import Document from libs.core.langchain_core.retrievers import BaseRetriever from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) class MergerRetriever(BaseRetriever): """Retriever that merges the results of multiple retrievers.""" retrievers: List[BaseRetriever] """A list of retrievers to merge.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """ Get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of relevant documents. """ # Merge the results of the retrievers. merged_documents = self.merge_documents(query, run_manager) return merged_documents async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: """ Asynchronously get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of relevant documents. """ # Merge the results of the retrievers. merged_documents = await self.amerge_documents(query, run_manager) return merged_documents def merge_documents( self, query: str, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """ Merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents. """ # Get the results of all retrievers. retriever_docs = [ retriever.get_relevant_documents( query, callbacks=run_manager.get_child("retriever_{}".format(i + 1)) ) for i, retriever in enumerate(self.retrievers) ] # Merge the results of the retrievers. merged_documents = [] max_docs = max(len(docs) for docs in retriever_docs) for i in range(max_docs): for retriever, doc in zip(self.retrievers, retriever_docs): if i < len(doc): merged_documents.append(doc[i]) return merged_documents async def amerge_documents( self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: """ Asynchronously merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents. """ # Get the results of all retrievers. retriever_docs = await asyncio.gather( *( retriever.aget_relevant_documents( query, callbacks=run_manager.get_child("retriever_{}".format(i + 1)) ) for i, retriever in enumerate(self.retrievers) ) ) # Merge the results of the retrievers. merged_documents = [] max_docs = max(len(docs) for docs in retriever_docs) for i in range(max_docs): for retriever, doc in zip(self.retrievers, retriever_docs): if i < len(doc): merged_documents.append(doc[i]) return merged_documents
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~indexes~_api.py
"""Module contains logic for indexing documents into vector stores.""" from __future__ import annotations import hashlib import json import uuid from itertools import islice from typing import ( Any, AsyncIterable, AsyncIterator, Callable, Dict, Iterable, Iterator, List, Literal, Optional, Sequence, Set, TypedDict, TypeVar, Union, cast, ) from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import root_validator from libs.core.langchain_core.vectorstores import VectorStore from langchain.document_loaders.base import BaseLoader from langchain.indexes.base import NAMESPACE_UUID, RecordManager T = TypeVar("T") def _hash_string_to_uuid(input_string: str) -> uuid.UUID: """Hashes a string and returns the corresponding UUID.""" hash_value = hashlib.sha1(input_string.encode("utf-8")).hexdigest() return uuid.uuid5(NAMESPACE_UUID, hash_value) def _hash_nested_dict_to_uuid(data: dict[Any, Any]) -> uuid.UUID: """Hashes a nested dictionary and returns the corresponding UUID.""" serialized_data = json.dumps(data, sort_keys=True) hash_value = hashlib.sha1(serialized_data.encode("utf-8")).hexdigest() return uuid.uuid5(NAMESPACE_UUID, hash_value) class _HashedDocument(Document): """A hashed document with a unique ID.""" uid: str hash_: str """The hash of the document including content and metadata.""" content_hash: str """The hash of the document content.""" metadata_hash: str """The hash of the document metadata.""" @classmethod def is_lc_serializable(cls) -> bool: return False @root_validator(pre=True) def calculate_hashes(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Root validator to calculate content and metadata hash.""" content = values.get("page_content", "") metadata = values.get("metadata", {}) forbidden_keys = ("hash_", "content_hash", "metadata_hash") for key in forbidden_keys: if key in metadata: raise ValueError( f"Metadata cannot contain key {key} as it " f"is reserved for internal use." ) content_hash = str(_hash_string_to_uuid(content)) try: metadata_hash = str(_hash_nested_dict_to_uuid(metadata)) except Exception as e: raise ValueError( f"Failed to hash metadata: {e}. " f"Please use a dict that can be serialized using json." ) values["content_hash"] = content_hash values["metadata_hash"] = metadata_hash values["hash_"] = str(_hash_string_to_uuid(content_hash + metadata_hash)) _uid = values.get("uid", None) if _uid is None: values["uid"] = values["hash_"] return values def to_document(self) -> Document: """Return a Document object.""" return Document( page_content=self.page_content, metadata=self.metadata, ) @classmethod def from_document( cls, document: Document, *, uid: Optional[str] = None ) -> _HashedDocument: """Create a HashedDocument from a Document.""" return cls( uid=uid, page_content=document.page_content, metadata=document.metadata, ) def _batch(size: int, iterable: Iterable[T]) -> Iterator[List[T]]: """Utility batching function.""" it = iter(iterable) while True: chunk = list(islice(it, size)) if not chunk: return yield chunk async def _abatch(size: int, iterable: AsyncIterable[T]) -> AsyncIterator[List[T]]: """Utility batching function.""" batch: List[T] = [] async for element in iterable: if len(batch) < size: batch.append(element) if len(batch) >= size: yield batch batch = [] if batch: yield batch def _get_source_id_assigner( source_id_key: Union[str, Callable[[Document], str], None], ) -> Callable[[Document], Union[str, None]]: """Get the source id from the document.""" if source_id_key is None: return lambda doc: None elif isinstance(source_id_key, str): return lambda doc: doc.metadata[source_id_key] elif callable(source_id_key): return source_id_key else: raise ValueError( f"source_id_key should be either None, a string or a callable. " f"Got {source_id_key} of type {type(source_id_key)}." ) def _deduplicate_in_order( hashed_documents: Iterable[_HashedDocument], ) -> Iterator[_HashedDocument]: """Deduplicate a list of hashed documents while preserving order.""" seen: Set[str] = set() for hashed_doc in hashed_documents: if hashed_doc.hash_ not in seen: seen.add(hashed_doc.hash_) yield hashed_doc # PUBLIC API class IndexingResult(TypedDict): """Return a detailed a breakdown of the result of the indexing operation.""" num_added: int """Number of added documents.""" num_updated: int """Number of updated documents because they were not up to date.""" num_deleted: int """Number of deleted documents.""" num_skipped: int """Number of skipped documents because they were already up to date.""" def index( docs_source: Union[BaseLoader, Iterable[Document]], record_manager: RecordManager, vector_store: VectorStore, *, batch_size: int = 100, cleanup: Literal["incremental", "full", None] = None, source_id_key: Union[str, Callable[[Document], str], None] = None, cleanup_batch_size: int = 1_000, force_update: bool = False, ) -> IndexingResult: """Index data from the loader into the vector store. Indexing functionality uses a manager to keep track of which documents are in the vector store. This allows us to keep track of which documents were updated, and which documents were deleted, which documents should be skipped. For the time being, documents are indexed using their hashes, and users are not able to specify the uid of the document. IMPORTANT: if auto_cleanup is set to True, the loader should be returning the entire dataset, and not just a subset of the dataset. Otherwise, the auto_cleanup will remove documents that it is not supposed to. Args: docs_source: Data loader or iterable of documents to index. record_manager: Timestamped set to keep track of which documents were updated. vector_store: Vector store to index the documents into. batch_size: Batch size to use when indexing. cleanup: How to handle clean up of documents. - Incremental: Cleans up all documents that haven't been updated AND that are associated with source ids that were seen during indexing. Clean up is done continuously during indexing helping to minimize the probability of users seeing duplicated content. - Full: Delete all documents that haven to been returned by the loader. Clean up runs after all documents have been indexed. This means that users may see duplicated content during indexing. - None: Do not delete any documents. source_id_key: Optional key that helps identify the original source of the document. cleanup_batch_size: Batch size to use when cleaning up documents. force_update: Force update documents even if they are present in the record manager. Useful if you are re-indexing with updated embeddings. Returns: Indexing result which contains information about how many documents were added, updated, deleted, or skipped. """ if cleanup not in {"incremental", "full", None}: raise ValueError( f"cleanup should be one of 'incremental', 'full' or None. " f"Got {cleanup}." ) if cleanup == "incremental" and source_id_key is None: raise ValueError("Source id key is required when cleanup mode is incremental.") # Check that the Vectorstore has required methods implemented methods = ["delete", "add_documents"] for method in methods: if not hasattr(vector_store, method): raise ValueError( f"Vectorstore {vector_store} does not have required method {method}" ) if type(vector_store).delete == VectorStore.delete: # Checking if the vectorstore has overridden the default delete method # implementation which just raises a NotImplementedError raise ValueError("Vectorstore has not implemented the delete method") if isinstance(docs_source, BaseLoader): try: doc_iterator = docs_source.lazy_load() except NotImplementedError: doc_iterator = iter(docs_source.load()) else: doc_iterator = iter(docs_source) source_id_assigner = _get_source_id_assigner(source_id_key) # Mark when the update started. index_start_dt = record_manager.get_time() num_added = 0 num_skipped = 0 num_updated = 0 num_deleted = 0 for doc_batch in _batch(batch_size, doc_iterator): hashed_docs = list( _deduplicate_in_order( [_HashedDocument.from_document(doc) for doc in doc_batch] ) ) source_ids: Sequence[Optional[str]] = [ source_id_assigner(doc) for doc in hashed_docs ] if cleanup == "incremental": # If the cleanup mode is incremental, source ids are required. for source_id, hashed_doc in zip(source_ids, hashed_docs): if source_id is None: raise ValueError( "Source ids are required when cleanup mode is incremental. " f"Document that starts with " f"content: {hashed_doc.page_content[:100]} was not assigned " f"as source id." ) # source ids cannot be None after for loop above. source_ids = cast(Sequence[str], source_ids) # type: ignore[assignment] exists_batch = record_manager.exists([doc.uid for doc in hashed_docs]) # Filter out documents that already exist in the record store. uids = [] docs_to_index = [] uids_to_refresh = [] seen_docs: Set[str] = set() for hashed_doc, doc_exists in zip(hashed_docs, exists_batch): if doc_exists: if force_update: seen_docs.add(hashed_doc.uid) else: uids_to_refresh.append(hashed_doc.uid) continue uids.append(hashed_doc.uid) docs_to_index.append(hashed_doc.to_document()) # Update refresh timestamp if uids_to_refresh: record_manager.update(uids_to_refresh, time_at_least=index_start_dt) num_skipped += len(uids_to_refresh) # Be pessimistic and assume that all vector store write will fail. # First write to vector store if docs_to_index: vector_store.add_documents(docs_to_index, ids=uids) num_added += len(docs_to_index) - len(seen_docs) num_updated += len(seen_docs) # And only then update the record store. # Update ALL records, even if they already exist since we want to refresh # their timestamp. record_manager.update( [doc.uid for doc in hashed_docs], group_ids=source_ids, time_at_least=index_start_dt, ) # If source IDs are provided, we can do the deletion incrementally! if cleanup == "incremental": # Get the uids of the documents that were not returned by the loader. # mypy isn't good enough to determine that source ids cannot be None # here due to a check that's happening above, so we check again. for source_id in source_ids: if source_id is None: raise AssertionError("Source ids cannot be None here.") _source_ids = cast(Sequence[str], source_ids) uids_to_delete = record_manager.list_keys( group_ids=_source_ids, before=index_start_dt ) if uids_to_delete: # Then delete from vector store. vector_store.delete(uids_to_delete) # First delete from record store. record_manager.delete_keys(uids_to_delete) num_deleted += len(uids_to_delete) if cleanup == "full": while uids_to_delete := record_manager.list_keys( before=index_start_dt, limit=cleanup_batch_size ): # First delete from record store. vector_store.delete(uids_to_delete) # Then delete from record manager. record_manager.delete_keys(uids_to_delete) num_deleted += len(uids_to_delete) return { "num_added": num_added, "num_updated": num_updated, "num_skipped": num_skipped, "num_deleted": num_deleted, } # Define an asynchronous generator function async def _to_async_iterator(iterator: Iterable[T]) -> AsyncIterator[T]: """Convert an iterable to an async iterator.""" for item in iterator: yield item async def aindex( docs_source: Union[Iterable[Document], AsyncIterator[Document]], record_manager: RecordManager, vector_store: VectorStore, *, batch_size: int = 100, cleanup: Literal["incremental", "full", None] = None, source_id_key: Union[str, Callable[[Document], str], None] = None, cleanup_batch_size: int = 1_000, force_update: bool = False, ) -> IndexingResult: """Index data from the loader into the vector store. Indexing functionality uses a manager to keep track of which documents are in the vector store. This allows us to keep track of which documents were updated, and which documents were deleted, which documents should be skipped. For the time being, documents are indexed using their hashes, and users are not able to specify the uid of the document. IMPORTANT: if auto_cleanup is set to True, the loader should be returning the entire dataset, and not just a subset of the dataset. Otherwise, the auto_cleanup will remove documents that it is not supposed to. Args: docs_source: Data loader or iterable of documents to index. record_manager: Timestamped set to keep track of which documents were updated. vector_store: Vector store to index the documents into. batch_size: Batch size to use when indexing. cleanup: How to handle clean up of documents. - Incremental: Cleans up all documents that haven't been updated AND that are associated with source ids that were seen during indexing. Clean up is done continuously during indexing helping to minimize the probability of users seeing duplicated content. - Full: Delete all documents that haven to been returned by the loader. Clean up runs after all documents have been indexed. This means that users may see duplicated content during indexing. - None: Do not delete any documents. source_id_key: Optional key that helps identify the original source of the document. cleanup_batch_size: Batch size to use when cleaning up documents. force_update: Force update documents even if they are present in the record manager. Useful if you are re-indexing with updated embeddings. Returns: Indexing result which contains information about how many documents were added, updated, deleted, or skipped. """ if cleanup not in {"incremental", "full", None}: raise ValueError( f"cleanup should be one of 'incremental', 'full' or None. " f"Got {cleanup}." ) if cleanup == "incremental" and source_id_key is None: raise ValueError("Source id key is required when cleanup mode is incremental.") # Check that the Vectorstore has required methods implemented methods = ["adelete", "aadd_documents"] for method in methods: if not hasattr(vector_store, method): raise ValueError( f"Vectorstore {vector_store} does not have required method {method}" ) if type(vector_store).adelete == VectorStore.adelete: # Checking if the vectorstore has overridden the default delete method # implementation which just raises a NotImplementedError raise ValueError("Vectorstore has not implemented the delete method") if isinstance(docs_source, BaseLoader): raise NotImplementedError( "Not supported yet. Please pass an async iterator of documents." ) async_doc_iterator: AsyncIterator[Document] if hasattr(docs_source, "__aiter__"): async_doc_iterator = docs_source # type: ignore[assignment] else: async_doc_iterator = _to_async_iterator(docs_source) source_id_assigner = _get_source_id_assigner(source_id_key) # Mark when the update started. index_start_dt = await record_manager.aget_time() num_added = 0 num_skipped = 0 num_updated = 0 num_deleted = 0 async for doc_batch in _abatch(batch_size, async_doc_iterator): hashed_docs = list( _deduplicate_in_order( [_HashedDocument.from_document(doc) for doc in doc_batch] ) ) source_ids: Sequence[Optional[str]] = [ source_id_assigner(doc) for doc in hashed_docs ] if cleanup == "incremental": # If the cleanup mode is incremental, source ids are required. for source_id, hashed_doc in zip(source_ids, hashed_docs): if source_id is None: raise ValueError( "Source ids are required when cleanup mode is incremental. " f"Document that starts with " f"content: {hashed_doc.page_content[:100]} was not assigned " f"as source id." ) # source ids cannot be None after for loop above. source_ids = cast(Sequence[str], source_ids) exists_batch = await record_manager.aexists([doc.uid for doc in hashed_docs]) # Filter out documents that already exist in the record store. uids: list[str] = [] docs_to_index: list[Document] = [] uids_to_refresh = [] seen_docs: Set[str] = set() for hashed_doc, doc_exists in zip(hashed_docs, exists_batch): if doc_exists: if force_update: seen_docs.add(hashed_doc.uid) else: uids_to_refresh.append(hashed_doc.uid) continue uids.append(hashed_doc.uid) docs_to_index.append(hashed_doc.to_document()) if uids_to_refresh: # Must be updated to refresh timestamp. await record_manager.aupdate(uids_to_refresh, time_at_least=index_start_dt) num_skipped += len(uids_to_refresh) # Be pessimistic and assume that all vector store write will fail. # First write to vector store if docs_to_index: await vector_store.aadd_documents(docs_to_index, ids=uids) num_added += len(docs_to_index) - len(seen_docs) num_updated += len(seen_docs) # And only then update the record store. # Update ALL records, even if they already exist since we want to refresh # their timestamp. await record_manager.aupdate( [doc.uid for doc in hashed_docs], group_ids=source_ids, time_at_least=index_start_dt, ) # If source IDs are provided, we can do the deletion incrementally! if cleanup == "incremental": # Get the uids of the documents that were not returned by the loader. # mypy isn't good enough to determine that source ids cannot be None # here due to a check that's happening above, so we check again. for source_id in source_ids: if source_id is None: raise AssertionError("Source ids cannot be None here.") _source_ids = cast(Sequence[str], source_ids) uids_to_delete = await record_manager.alist_keys( group_ids=_source_ids, before=index_start_dt ) if uids_to_delete: # Then delete from vector store. await vector_store.adelete(uids_to_delete) # First delete from record store. await record_manager.adelete_keys(uids_to_delete) num_deleted += len(uids_to_delete) if cleanup == "full": while uids_to_delete := await record_manager.alist_keys( before=index_start_dt, limit=cleanup_batch_size ): # First delete from record store. await vector_store.adelete(uids_to_delete) # Then delete from record manager. await record_manager.adelete_keys(uids_to_delete) num_deleted += len(uids_to_delete) return { "num_added": num_added, "num_updated": num_updated, "num_skipped": num_skipped, "num_deleted": num_deleted, }
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~ainetwork~owner.py
import builtins import json from typing import List, Optional, Type, Union from libs.core.langchain_core.callbacks import AsyncCallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType class RuleSchema(BaseModel): """Schema for owner operations.""" type: OperationType = Field(...) path: str = Field(..., description="Blockchain reference path") address: Optional[Union[str, List[str]]] = Field( None, description="A single address or a list of addresses" ) write_owner: Optional[bool] = Field( False, description="Authority to edit the `owner` property of the path" ) write_rule: Optional[bool] = Field( False, description="Authority to edit `write rule` for the path" ) write_function: Optional[bool] = Field( False, description="Authority to `set function` for the path" ) branch_owner: Optional[bool] = Field( False, description="Authority to initialize `owner` of sub-paths" ) class AINOwnerOps(AINBaseTool): """Tool for owner operations.""" name: str = "AINownerOps" description: str = """ Rules for `owner` in AINetwork Blockchain database. An address set as `owner` can modify permissions according to its granted authorities ## Path Rule - (/[a-zA-Z_0-9]+)+ - Permission checks ascend from the most specific (child) path to broader (parent) paths until an `owner` is located. ## Address Rules - 0x[0-9a-fA-F]{40}: 40-digit hexadecimal address - *: All addresses permitted - Defaults to the current session's address ## SET - `SET` alters permissions for specific addresses, while other addresses remain unaffected. - When removing an address of `owner`, set all authorities for that address to false. - message `write_owner permission evaluated false` if fail ### Example - type: SET - path: /apps/langchain - address: [<address 1>, <address 2>] - write_owner: True - write_rule: True - write_function: True - branch_owner: True ## GET - Provides all addresses with `owner` permissions and their authorities in the path. ### Example - type: GET - path: /apps/langchain """ # noqa: E501 args_schema: Type[BaseModel] = RuleSchema async def _arun( self, type: OperationType, path: str, address: Optional[Union[str, List[str]]] = None, write_owner: Optional[bool] = None, write_rule: Optional[bool] = None, write_function: Optional[bool] = None, branch_owner: Optional[bool] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: from ain.types import ValueOnlyTransactionInput try: if type is OperationType.SET: if address is None: address = self.interface.wallet.defaultAccount.address if isinstance(address, str): address = [address] res = await self.interface.db.ref(path).setOwner( transactionInput=ValueOnlyTransactionInput( value={ ".owner": { "owners": { address: { "write_owner": write_owner or False, "write_rule": write_rule or False, "write_function": write_function or False, "branch_owner": branch_owner or False, } for address in address } } } ) ) elif type is OperationType.GET: res = await self.interface.db.ref(path).getOwner() else: raise ValueError(f"Unsupported 'type': {type}.") return json.dumps(res, ensure_ascii=False) except Exception as e: return f"{builtins.type(e).__name__}: {str(e)}"
[ "\nRules for `owner` in AINetwork Blockchain database.\nAn address set as `owner` can modify permissions according to its granted authorities\n\n## Path Rule\n- (/[a-zA-Z_0-9]+)+\n- Permission checks ascend from the most specific (child) path to broader (parent) paths until an `owner` is located.\n\n## Address Rules\n- 0x[0-9a-fA-F]{40}: 40-digit hexadecimal address\n- *: All addresses permitted\n- Defaults to the current session's address\n\n## SET\n- `SET` alters permissions for specific addresses, while other addresses remain unaffected.\n- When removing an address of `owner`, set all authorities for that address to false.\n- message `write_owner permission evaluated false` if fail\n\n### Example\n- type: SET\n- path: /apps/langchain\n- address: [<address 1>, <address 2>]\n- write_owner: True\n- write_rule: True\n- write_function: True\n- branch_owner: True\n\n## GET\n- Provides all addresses with `owner` permissions and their authorities in the path.\n\n### Example\n- type: GET\n- path: /apps/langchain\n" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~singlestoredb.py
from __future__ import annotations import json import re from typing import ( Any, Callable, Iterable, List, Optional, Tuple, Type, ) from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore, VectorStoreRetriever from sqlalchemy.pool import QueuePool from langchain_community.vectorstores.utils import DistanceStrategy DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.DOT_PRODUCT ORDERING_DIRECTIVE: dict = { DistanceStrategy.EUCLIDEAN_DISTANCE: "", DistanceStrategy.DOT_PRODUCT: "DESC", } class SingleStoreDB(VectorStore): """`SingleStore DB` vector store. The prerequisite for using this class is the installation of the ``singlestoredb`` Python package. The SingleStoreDB vectorstore can be created by providing an embedding function and the relevant parameters for the database connection, connection pool, and optionally, the names of the table and the fields to use. """ def _get_connection(self: SingleStoreDB) -> Any: try: import singlestoredb as s2 except ImportError: raise ImportError( "Could not import singlestoredb python package. " "Please install it with `pip install singlestoredb`." ) return s2.connect(**self.connection_kwargs) def __init__( self, embedding: Embeddings, *, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, table_name: str = "embeddings", content_field: str = "content", metadata_field: str = "metadata", vector_field: str = "vector", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ): """Initialize with necessary components. Args: embedding (Embeddings): A text embedding model. distance_strategy (DistanceStrategy, optional): Determines the strategy employed for calculating the distance between vectors in the embedding space. Defaults to DOT_PRODUCT. Available options are: - DOT_PRODUCT: Computes the scalar product of two vectors. This is the default behavior - EUCLIDEAN_DISTANCE: Computes the Euclidean distance between two vectors. This metric considers the geometric distance in the vector space, and might be more suitable for embeddings that rely on spatial relationships. table_name (str, optional): Specifies the name of the table in use. Defaults to "embeddings". content_field (str, optional): Specifies the field to store the content. Defaults to "content". metadata_field (str, optional): Specifies the field to store metadata. Defaults to "metadata". vector_field (str, optional): Specifies the field to store the vector. Defaults to "vector". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import SingleStoreDB vectorstore = SingleStoreDB( OpenAIEmbeddings(), host="https://user:[email protected]:3306/database" ) Advanced Usage: .. code-block:: python from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import SingleStoreDB vectorstore = SingleStoreDB( OpenAIEmbeddings(), distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import SingleStoreDB os.environ['SINGLESTOREDB_URL'] = 'me:[email protected]/my_db' vectorstore = SingleStoreDB(OpenAIEmbeddings()) """ self.embedding = embedding self.distance_strategy = distance_strategy self.table_name = self._sanitize_input(table_name) self.content_field = self._sanitize_input(content_field) self.metadata_field = self._sanitize_input(metadata_field) self.vector_field = self._sanitize_input(vector_field) # Pass the rest of the kwargs to the connection. self.connection_kwargs = kwargs # Add program name and version to connection attributes. if "conn_attrs" not in self.connection_kwargs: self.connection_kwargs["conn_attrs"] = dict() self.connection_kwargs["conn_attrs"]["_connector_name"] = "langchain python sdk" self.connection_kwargs["conn_attrs"]["_connector_version"] = "1.0.1" # Create connection pool. self.connection_pool = QueuePool( self._get_connection, max_overflow=max_overflow, pool_size=pool_size, timeout=timeout, ) self._create_table() @property def embeddings(self) -> Embeddings: return self.embedding def _sanitize_input(self, input_str: str) -> str: # Remove characters that are not alphanumeric or underscores return re.sub(r"[^a-zA-Z0-9_]", "", input_str) def _select_relevance_score_fn(self) -> Callable[[float], float]: return self._max_inner_product_relevance_score_fn def _create_table(self: SingleStoreDB) -> None: """Create table if it doesn't exist.""" conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """CREATE TABLE IF NOT EXISTS {} ({} TEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, {} BLOB, {} JSON);""".format( self.table_name, self.content_field, self.vector_field, self.metadata_field, ), ) finally: cur.close() finally: conn.close() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. Returns: List[str]: empty list """ conn = self.connection_pool.connect() try: cur = conn.cursor() try: # Write data to singlestore db for i, text in enumerate(texts): # Use provided values by default or fallback metadata = metadatas[i] if metadatas else {} embedding = ( embeddings[i] if embeddings else self.embedding.embed_documents([text])[0] ) cur.execute( "INSERT INTO {} VALUES (%s, JSON_ARRAY_PACK(%s), %s)".format( self.table_name ), ( text, "[{}]".format(",".join(map(str, embedding))), json.dumps(metadata), ), ) finally: cur.close() finally: conn.close() return [] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """Returns the most similar indexed documents to the query text. Uses cosine similarity. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (dict): A dictionary of metadata fields and values to filter by. Returns: List[Document]: A list of documents that are most similar to the query text. Examples: .. code-block:: python from langchain_community.vectorstores import SingleStoreDB from langchain_community.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_documents( docs, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) s2.similarity_search("query text", 1, {"metadata_field": "metadata_value"}) """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Uses cosine similarity. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query and score for each """ # Creates embedding vector from user query embedding = self.embedding.embed_query(query) conn = self.connection_pool.connect() result = [] where_clause: str = "" where_clause_values: List[Any] = [] if filter: where_clause = "WHERE " arguments = [] def build_where_clause( where_clause_values: List[Any], sub_filter: dict, prefix_args: Optional[List[str]] = None, ) -> None: prefix_args = prefix_args or [] for key in sub_filter.keys(): if isinstance(sub_filter[key], dict): build_where_clause( where_clause_values, sub_filter[key], prefix_args + [key] ) else: arguments.append( "JSON_EXTRACT_JSON({}, {}) = %s".format( self.metadata_field, ", ".join(["%s"] * (len(prefix_args) + 1)), ) ) where_clause_values += prefix_args + [key] where_clause_values.append(json.dumps(sub_filter[key])) build_where_clause(where_clause_values, filter) where_clause += " AND ".join(arguments) try: cur = conn.cursor() try: cur.execute( """SELECT {}, {}, {}({}, JSON_ARRAY_PACK(%s)) as __score FROM {} {} ORDER BY __score {} LIMIT %s""".format( self.content_field, self.metadata_field, self.distance_strategy.name if isinstance(self.distance_strategy, DistanceStrategy) else self.distance_strategy, self.vector_field, self.table_name, where_clause, ORDERING_DIRECTIVE[self.distance_strategy], ), ("[{}]".format(",".join(map(str, embedding))),) + tuple(where_clause_values) + (k,), ) for row in cur.fetchall(): doc = Document(page_content=row[0], metadata=row[1]) result.append((doc, float(row[2]))) finally: cur.close() finally: conn.close() return result @classmethod def from_texts( cls: Type[SingleStoreDB], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, table_name: str = "embeddings", content_field: str = "content", metadata_field: str = "metadata", vector_field: str = "vector", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ) -> SingleStoreDB: """Create a SingleStoreDB vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new table for the embeddings in SingleStoreDB. 3. Adds the documents to the newly created table. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import SingleStoreDB from langchain_community.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_texts( texts, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) """ instance = cls( embedding, distance_strategy=distance_strategy, table_name=table_name, content_field=content_field, metadata_field=metadata_field, vector_field=vector_field, pool_size=pool_size, max_overflow=max_overflow, timeout=timeout, **kwargs, ) instance.add_texts(texts, metadatas, embedding.embed_documents(texts), **kwargs) return instance # SingleStoreDBRetriever is not needed, but we keep it for backwards compatibility SingleStoreDBRetriever = VectorStoreRetriever
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~ollama.py
import json from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional import aiohttp import requests from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.language_models.llms import BaseLLM from libs.core.langchain_core.outputs import GenerationChunk, LLMResult from libs.core.langchain_core.pydantic_v1 import Extra def _stream_response_to_generation_chunk( stream_response: str, ) -> GenerationChunk: """Convert a stream response to a generation chunk.""" parsed_response = json.loads(stream_response) generation_info = parsed_response if parsed_response.get("done") is True else None return GenerationChunk( text=parsed_response.get("response", ""), generation_info=generation_info ) class OllamaEndpointNotFoundError(Exception): """Raised when the Ollama endpoint is not found.""" class _OllamaCommon(BaseLanguageModel): base_url: str = "http://localhost:11434" """Base url the model is hosted under.""" model: str = "llama2" """Model name to use.""" mirostat: Optional[int] = None """Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)""" mirostat_eta: Optional[float] = None """Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)""" mirostat_tau: Optional[float] = None """Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)""" num_ctx: Optional[int] = None """Sets the size of the context window used to generate the next token. (Default: 2048) """ num_gpu: Optional[int] = None """The number of GPUs to use. On macOS it defaults to 1 to enable metal support, 0 to disable.""" num_thread: Optional[int] = None """Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores).""" repeat_last_n: Optional[int] = None """Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)""" repeat_penalty: Optional[float] = None """Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)""" temperature: Optional[float] = None """The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)""" stop: Optional[List[str]] = None """Sets the stop tokens to use.""" tfs_z: Optional[float] = None """Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)""" top_k: Optional[int] = None """Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)""" top_p: Optional[int] = None """Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)""" system: Optional[str] = None """system prompt (overrides what is defined in the Modelfile)""" template: Optional[str] = None """full prompt or prompt template (overrides what is defined in the Modelfile)""" format: Optional[str] = None """Specify the format of the output (e.g., json)""" timeout: Optional[int] = None """Timeout for the request stream""" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Ollama.""" return { "model": self.model, "format": self.format, "options": { "mirostat": self.mirostat, "mirostat_eta": self.mirostat_eta, "mirostat_tau": self.mirostat_tau, "num_ctx": self.num_ctx, "num_gpu": self.num_gpu, "num_thread": self.num_thread, "repeat_last_n": self.repeat_last_n, "repeat_penalty": self.repeat_penalty, "temperature": self.temperature, "stop": self.stop, "tfs_z": self.tfs_z, "top_k": self.top_k, "top_p": self.top_p, }, "system": self.system, "template": self.template, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model, "format": self.format}, **self._default_params} def _create_generate_stream( self, prompt: str, stop: Optional[List[str]] = None, images: Optional[List[str]] = None, **kwargs: Any, ) -> Iterator[str]: payload = {"prompt": prompt, "images": images} yield from self._create_stream( payload=payload, stop=stop, api_url=f"{self.base_url}/api/generate/", **kwargs, ) async def _acreate_generate_stream( self, prompt: str, stop: Optional[List[str]] = None, images: Optional[List[str]] = None, **kwargs: Any, ) -> AsyncIterator[str]: payload = {"prompt": prompt, "images": images} async for item in self._acreate_stream( payload=payload, stop=stop, api_url=f"{self.base_url}/api/generate/", **kwargs, ): yield item def _create_stream( self, api_url: str, payload: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> Iterator[str]: if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: stop = self.stop elif stop is None: stop = [] params = self._default_params if "model" in kwargs: params["model"] = kwargs["model"] if "options" in kwargs: params["options"] = kwargs["options"] else: params["options"] = { **params["options"], "stop": stop, **kwargs, } if payload.get("messages"): request_payload = {"messages": payload.get("messages", []), **params} else: request_payload = { "prompt": payload.get("prompt"), "images": payload.get("images", []), **params, } response = requests.post( url=api_url, headers={"Content-Type": "application/json"}, json=request_payload, stream=True, timeout=self.timeout, ) response.encoding = "utf-8" if response.status_code != 200: if response.status_code == 404: raise OllamaEndpointNotFoundError( "Ollama call failed with status code 404. " "Maybe your model is not found " f"and you should pull the model with `ollama pull {self.model}`." ) else: optional_detail = response.json().get("error") raise ValueError( f"Ollama call failed with status code {response.status_code}." f" Details: {optional_detail}" ) return response.iter_lines(decode_unicode=True) async def _acreate_stream( self, api_url: str, payload: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> AsyncIterator[str]: if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: stop = self.stop elif stop is None: stop = [] params = self._default_params if "model" in kwargs: params["model"] = kwargs["model"] if "options" in kwargs: params["options"] = kwargs["options"] else: params["options"] = { **params["options"], "stop": stop, **kwargs, } if payload.get("messages"): request_payload = {"messages": payload.get("messages", []), **params} else: request_payload = { "prompt": payload.get("prompt"), "images": payload.get("images", []), **params, } async with aiohttp.ClientSession() as session: async with session.post( url=api_url, headers={"Content-Type": "application/json"}, json=request_payload, timeout=self.timeout, ) as response: if response.status != 200: if response.status == 404: raise OllamaEndpointNotFoundError( "Ollama call failed with status code 404." ) else: optional_detail = await response.json().get("error") raise ValueError( f"Ollama call failed with status code {response.status}." f" Details: {optional_detail}" ) async for line in response.content: yield line.decode("utf-8") def _stream_with_aggregation( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, verbose: bool = False, **kwargs: Any, ) -> GenerationChunk: final_chunk: Optional[GenerationChunk] = None for stream_resp in self._create_generate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_generation_chunk(stream_resp) if final_chunk is None: final_chunk = chunk else: final_chunk += chunk if run_manager: run_manager.on_llm_new_token( chunk.text, verbose=verbose, ) if final_chunk is None: raise ValueError("No data received from Ollama stream.") return final_chunk async def _astream_with_aggregation( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, verbose: bool = False, **kwargs: Any, ) -> GenerationChunk: final_chunk: Optional[GenerationChunk] = None async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_generation_chunk(stream_resp) if final_chunk is None: final_chunk = chunk else: final_chunk += chunk if run_manager: await run_manager.on_llm_new_token( chunk.text, verbose=verbose, ) if final_chunk is None: raise ValueError("No data received from Ollama stream.") return final_chunk class Ollama(BaseLLM, _OllamaCommon): """Ollama locally runs large language models. To use, follow the instructions at https://ollama.ai/. Example: .. code-block:: python from langchain_community.llms import Ollama ollama = Ollama(model="llama2") """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _llm_type(self) -> str: """Return type of llm.""" return "ollama-llm" def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, images: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to Ollama's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ollama("Tell me a joke.") """ # TODO: add caching here. generations = [] for prompt in prompts: final_chunk = super()._stream_with_aggregation( prompt, stop=stop, images=images, run_manager=run_manager, verbose=self.verbose, **kwargs, ) generations.append([final_chunk]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, images: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to Ollama's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ollama("Tell me a joke.") """ # TODO: add caching here. generations = [] for prompt in prompts: final_chunk = await super()._astream_with_aggregation( prompt, stop=stop, images=images, run_manager=run_manager, verbose=self.verbose, **kwargs, ) generations.append([final_chunk]) return LLMResult(generations=generations) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: for stream_resp in self._create_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token( chunk.text, verbose=self.verbose, ) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: async for stream_resp in self._acreate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: await run_manager.on_llm_new_token( chunk.text, verbose=self.verbose, )
[ "None" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~vectara.py
from __future__ import annotations import json import logging import os from dataclasses import dataclass, field from hashlib import md5 from typing import Any, Iterable, List, Optional, Tuple, Type import requests from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import Field from libs.core.langchain_core.vectorstores import VectorStore, VectorStoreRetriever logger = logging.getLogger(__name__) @dataclass class SummaryConfig: """ is_enabled: True if summary is enabled, False otherwise max_results: maximum number of results to summarize response_lang: requested language for the summary """ is_enabled: bool = False max_results: int = 7 response_lang: str = "eng" @dataclass class MMRConfig: """ is_enabled: True if MMR is enabled, False otherwise mmr_k: number of results to fetch for MMR, defaults to 50 diversity_bias: number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to minimum diversity and 1 to maximum diversity. Defaults to 0.3. Note: diversity_bias is equivalent 1-lambda_mult where lambda_mult is the value often used in max_marginal_relevance_search() We chose to use that since we believe it's more intuitive to the user. """ is_enabled: bool = False mmr_k: int = 50 diversity_bias: float = 0.3 @dataclass class VectaraQueryConfig: """ k: Number of Documents to return. Defaults to 10. lambda_val: lexical match parameter for hybrid search. filter Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. score_threshold: minimal score threshold for the result. If defined, results with score less than this value will be filtered out. n_sentence_context: number of sentences before/after the matching segment to add, defaults to 2 mmr_config: MMRConfig configuration dataclass summary_config: SummaryConfig configuration dataclass """ k: int = 10 lambda_val: float = 0.0 filter: str = "" score_threshold: Optional[float] = None n_sentence_context: int = 2 mmr_config: MMRConfig = field(default_factory=MMRConfig) summary_config: SummaryConfig = field(default_factory=SummaryConfig) class Vectara(VectorStore): """`Vectara API` vector store. See (https://vectara.com). Example: .. code-block:: python from langchain.vectorstores import Vectara vectorstore = Vectara( vectara_customer_id=vectara_customer_id, vectara_corpus_id=vectara_corpus_id, vectara_api_key=vectara_api_key ) """ def __init__( self, vectara_customer_id: Optional[str] = None, vectara_corpus_id: Optional[str] = None, vectara_api_key: Optional[str] = None, vectara_api_timeout: int = 120, source: str = "langchain", ): """Initialize with Vectara API.""" self._vectara_customer_id = vectara_customer_id or os.environ.get( "VECTARA_CUSTOMER_ID" ) self._vectara_corpus_id = vectara_corpus_id or os.environ.get( "VECTARA_CORPUS_ID" ) self._vectara_api_key = vectara_api_key or os.environ.get("VECTARA_API_KEY") if ( self._vectara_customer_id is None or self._vectara_corpus_id is None or self._vectara_api_key is None ): logger.warning( "Can't find Vectara credentials, customer_id or corpus_id in " "environment." ) else: logger.debug(f"Using corpus id {self._vectara_corpus_id}") self._source = source self._session = requests.Session() # to reuse connections adapter = requests.adapters.HTTPAdapter(max_retries=3) self._session.mount("http://", adapter) self.vectara_api_timeout = vectara_api_timeout @property def embeddings(self) -> Optional[Embeddings]: return None def _get_post_headers(self) -> dict: """Returns headers that should be attached to each post request.""" return { "x-api-key": self._vectara_api_key, "customer-id": self._vectara_customer_id, "Content-Type": "application/json", "X-Source": self._source, } def _delete_doc(self, doc_id: str) -> bool: """ Delete a document from the Vectara corpus. Args: url (str): URL of the page to delete. doc_id (str): ID of the document to delete. Returns: bool: True if deletion was successful, False otherwise. """ body = { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "document_id": doc_id, } response = self._session.post( "https://api.vectara.io/v1/delete-doc", data=json.dumps(body), verify=True, headers=self._get_post_headers(), timeout=self.vectara_api_timeout, ) if response.status_code != 200: logger.error( f"Delete request failed for doc_id = {doc_id} with status code " f"{response.status_code}, reason {response.reason}, text " f"{response.text}" ) return False return True def _index_doc(self, doc: dict, use_core_api: bool = False) -> str: request: dict[str, Any] = {} request["customer_id"] = self._vectara_customer_id request["corpus_id"] = self._vectara_corpus_id request["document"] = doc api_endpoint = ( "https://api.vectara.io/v1/core/index" if use_core_api else "https://api.vectara.io/v1/index" ) response = self._session.post( headers=self._get_post_headers(), url=api_endpoint, data=json.dumps(request), timeout=self.vectara_api_timeout, verify=True, ) status_code = response.status_code result = response.json() status_str = result["status"]["code"] if "status" in result else None if status_code == 409 or status_str and (status_str == "ALREADY_EXISTS"): return "E_ALREADY_EXISTS" elif status_str and (status_str == "FORBIDDEN"): return "E_NO_PERMISSIONS" else: return "E_SUCCEEDED" def add_files( self, files_list: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """ Vectara provides a way to add documents directly via our API where pre-processing and chunking occurs internally in an optimal way This method provides a way to use that API in LangChain Args: files_list: Iterable of strings, each representing a local file path. Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc. see API docs for full list metadatas: Optional list of metadatas associated with each file Returns: List of ids associated with each of the files indexed """ doc_ids = [] for inx, file in enumerate(files_list): if not os.path.exists(file): logger.error(f"File {file} does not exist, skipping") continue md = metadatas[inx] if metadatas else {} files: dict = { "file": (file, open(file, "rb")), "doc_metadata": json.dumps(md), } headers = self._get_post_headers() headers.pop("Content-Type") response = self._session.post( f"https://api.vectara.io/upload?c={self._vectara_customer_id}&o={self._vectara_corpus_id}&d=True", files=files, verify=True, headers=headers, timeout=self.vectara_api_timeout, ) if response.status_code == 409: doc_id = response.json()["document"]["documentId"] logger.info( f"File {file} already exists on Vectara (doc_id={doc_id}), skipping" ) elif response.status_code == 200: doc_id = response.json()["document"]["documentId"] doc_ids.append(doc_id) else: logger.info(f"Error indexing file {file}: {response.json()}") return doc_ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara corpus as a single Vectara document, where each input text is considered a "section" and the metadata are associated with each section. if 'doc_metadata' is provided, it is associated with the Vectara document. Returns: document ID of the document added """ doc_hash = md5() for t in texts: doc_hash.update(t.encode()) doc_id = doc_hash.hexdigest() if metadatas is None: metadatas = [{} for _ in texts] if doc_metadata: doc_metadata["source"] = "langchain" else: doc_metadata = {"source": "langchain"} use_core_api = kwargs.get("use_core_api", False) section_key = "parts" if use_core_api else "section" doc = { "document_id": doc_id, "metadataJson": json.dumps(doc_metadata), section_key: [ {"text": text, "metadataJson": json.dumps(md)} for text, md in zip(texts, metadatas) ], } success_str = self._index_doc(doc, use_core_api=use_core_api) if success_str == "E_ALREADY_EXISTS": self._delete_doc(doc_id) self._index_doc(doc) elif success_str == "E_NO_PERMISSIONS": print( """No permissions to add document to Vectara. Check your corpus ID, customer ID and API key""" ) return [doc_id] def vectara_query( self, query: str, config: VectaraQueryConfig, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run a Vectara query Args: query: Text to look up documents similar to. config: VectaraQueryConfig object Returns: A list of k Documents matching the given query If summary is enabled, last document is the summary text with 'summary'=True """ if isinstance(config.mmr_config, dict): config.mmr_config = MMRConfig(**config.mmr_config) if isinstance(config.summary_config, dict): config.summary_config = SummaryConfig(**config.summary_config) data = { "query": [ { "query": query, "start": 0, "numResults": config.mmr_config.mmr_k if config.mmr_config.is_enabled else config.k, "contextConfig": { "sentencesBefore": config.n_sentence_context, "sentencesAfter": config.n_sentence_context, }, "corpusKey": [ { "customerId": self._vectara_customer_id, "corpusId": self._vectara_corpus_id, "metadataFilter": config.filter, "lexicalInterpolationConfig": {"lambda": config.lambda_val}, } ], } ] } if config.mmr_config.is_enabled: data["query"][0]["rerankingConfig"] = { "rerankerId": 272725718, "mmrConfig": {"diversityBias": config.mmr_config.diversity_bias}, } if config.summary_config.is_enabled: data["query"][0]["summary"] = [ { "maxSummarizedResults": config.summary_config.max_results, "responseLang": config.summary_config.response_lang, } ] response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/query", data=json.dumps(data), timeout=self.vectara_api_timeout, ) if response.status_code != 200: logger.error( "Query failed %s", f"(code {response.status_code}, reason {response.reason}, details " f"{response.text})", ) return [], "" result = response.json() if config.score_threshold: responses = [ r for r in result["responseSet"][0]["response"] if r["score"] > config.score_threshold ] else: responses = result["responseSet"][0]["response"] documents = result["responseSet"][0]["document"] metadatas = [] for x in responses: md = {m["name"]: m["value"] for m in x["metadata"]} doc_num = x["documentIndex"] doc_md = {m["name"]: m["value"] for m in documents[doc_num]["metadata"]} if "source" not in doc_md: doc_md["source"] = "vectara" md.update(doc_md) metadatas.append(md) res = [ ( Document( page_content=x["text"], metadata=md, ), x["score"], ) for x, md in zip(responses, metadatas) ] if config.mmr_config.is_enabled: res = res[: config.k] if config.summary_config.is_enabled: summary = result["responseSet"][0]["summary"][0]["text"] res.append( (Document(page_content=summary, metadata={"summary": True}), 0.0) ) return res def similarity_search_with_score( self, query: str, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. any other querying variable in VectaraQueryConfig like: - lambda_val: lexical match parameter for hybrid search. - filter: filter string - score_threshold: minimal score threshold for the result. - n_sentence_context: number of sentences before/after the matching segment - mmr_config: optional configuration for MMR (see MMRConfig dataclass) - summary_config: optional configuration for summary (see SummaryConfig dataclass) Returns: List of Documents most similar to the query and score for each. """ config = VectaraQueryConfig(**kwargs) docs = self.vectara_query(query, config) return docs def similarity_search( self, query: str, **kwargs: Any, ) -> List[Document]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. any other querying variable in VectaraQueryConfig Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query, **kwargs, ) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search( self, query: str, fetch_k: int = 50, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 50 lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. kwargs: any other querying variable in VectaraQueryConfig Returns: List of Documents selected by maximal marginal relevance. """ kwargs["mmr_config"] = MMRConfig( is_enabled=True, mmr_k=fetch_k, diversity_bias=1 - lambda_mult ) return self.similarity_search(query, **kwargs) @classmethod def from_texts( cls: Type[Vectara], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Vectara vectara = Vectara.from_texts( texts, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Notes: # * Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) # * when metadatas[] are provided they are associated with each "part" # in Vectara. doc_metadata can be used to provide additional metadata # for the document itself (applies to all "texts" in this call) doc_metadata = kwargs.pop("doc_metadata", {}) vectara = cls(**kwargs) vectara.add_texts(texts, metadatas, doc_metadata=doc_metadata, **kwargs) return vectara @classmethod def from_files( cls: Type[Vectara], files: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Vectara vectara = Vectara.from_files( files_list, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Note: Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) vectara = cls(**kwargs) vectara.add_files(files, metadatas) return vectara class VectaraRetriever(VectorStoreRetriever): """Retriever class for `Vectara`.""" vectorstore: Vectara """Vectara vectorstore.""" search_kwargs: dict = Field( default_factory=lambda: { "lambda_val": 0.0, "k": 5, "filter": "", "n_sentence_context": "2", } ) """Search params. k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add """ def add_texts( self, texts: List[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = None, ) -> None: """Add text to the Vectara vectorstore. Args: texts (List[str]): The text metadatas (List[dict]): Metadata dicts, must line up with existing store """ self.vectorstore.add_texts(texts, metadatas, doc_metadata or {})
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~document_loaders~test_pyspark_dataframe_loader.py
import random import string from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.pyspark_dataframe import ( PySparkDataFrameLoader, ) def test_pyspark_loader_load_valid_data() -> None: from pyspark.sql import SparkSession # Requires a session to be set up spark = SparkSession.builder.getOrCreate() data = [ (random.choice(string.ascii_letters), random.randint(0, 1)) for _ in range(3) ] df = spark.createDataFrame(data, ["text", "label"]) expected_docs = [ Document( page_content=data[0][0], metadata={"label": data[0][1]}, ), Document( page_content=data[1][0], metadata={"label": data[1][1]}, ), Document( page_content=data[2][0], metadata={"label": data[2][1]}, ), ] loader = PySparkDataFrameLoader( spark_session=spark, df=df, page_content_column="text" ) result = loader.load() assert result == expected_docs
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~azure_openai.py
"""Azure OpenAI embeddings wrapper.""" from __future__ import annotations import os import warnings from typing import Callable, Dict, Optional, Union from libs.core.langchain_core.pydantic_v1 import Field, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_community.utils.openai import is_openai_v1 class AzureOpenAIEmbeddings(OpenAIEmbeddings): """`Azure OpenAI` Embeddings API.""" azure_endpoint: Union[str, None] = None """Your Azure endpoint, including the resource. Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. Example: `https://example-resource.azure.openai.com/` """ deployment: Optional[str] = Field(default=None, alias="azure_deployment") """A model deployment. If given sets the base client URL to include `/deployments/{azure_deployment}`. Note: this means you won't be able to use non-deployment endpoints. """ openai_api_key: Union[str, None] = Field(default=None, alias="api_key") """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" azure_ad_token: Union[str, None] = None """Your Azure Active Directory token. Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. """ # noqa: E501 azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. Will be invoked on every request. """ openai_api_version: Optional[str] = Field(default=None, alias="api_version") """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" validate_base_url: bool = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" # Check OPENAI_KEY for backwards compatibility. # TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using # other forms of azure credentials. values["openai_api_key"] = ( values["openai_api_key"] or os.getenv("AZURE_OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY") ) values["openai_api_base"] = values["openai_api_base"] or os.getenv( "OPENAI_API_BASE" ) values["openai_api_version"] = values["openai_api_version"] or os.getenv( "OPENAI_API_VERSION", default="2023-05-15" ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="azure" ) values["openai_organization"] = ( values["openai_organization"] or os.getenv("OPENAI_ORG_ID") or os.getenv("OPENAI_ORGANIZATION") ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) values["azure_endpoint"] = values["azure_endpoint"] or os.getenv( "AZURE_OPENAI_ENDPOINT" ) values["azure_ad_token"] = values["azure_ad_token"] or os.getenv( "AZURE_OPENAI_AD_TOKEN" ) # Azure OpenAI embedding models allow a maximum of 16 texts # at a time in each batch # See: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings values["chunk_size"] = min(values["chunk_size"], 16) try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if is_openai_v1(): # For backwards compatibility. Before openai v1, no distinction was made # between azure_endpoint and base_url (openai_api_base). openai_api_base = values["openai_api_base"] if openai_api_base and values["validate_base_url"]: if "/openai" not in openai_api_base: values["openai_api_base"] += "/openai" warnings.warn( "As of openai>=1.0.0, Azure endpoints should be specified via " f"the `azure_endpoint` param not `openai_api_base` " f"(or alias `base_url`). Updating `openai_api_base` from " f"{openai_api_base} to {values['openai_api_base']}." ) if values["deployment"]: warnings.warn( "As of openai>=1.0.0, if `deployment` (or alias " "`azure_deployment`) is specified then " "`openai_api_base` (or alias `base_url`) should not be. " "Instead use `deployment` (or alias `azure_deployment`) " "and `azure_endpoint`." ) if values["deployment"] not in values["openai_api_base"]: warnings.warn( "As of openai>=1.0.0, if `openai_api_base` " "(or alias `base_url`) is specified it is expected to be " "of the form " "https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501 f"Updating {openai_api_base} to " f"{values['openai_api_base']}." ) values["openai_api_base"] += ( "/deployments/" + values["deployment"] ) values["deployment"] = None client_params = { "api_version": values["openai_api_version"], "azure_endpoint": values["azure_endpoint"], "azure_deployment": values["deployment"], "api_key": values["openai_api_key"], "azure_ad_token": values["azure_ad_token"], "azure_ad_token_provider": values["azure_ad_token_provider"], "organization": values["openai_organization"], "base_url": values["openai_api_base"], "timeout": values["request_timeout"], "max_retries": values["max_retries"], "default_headers": values["default_headers"], "default_query": values["default_query"], "http_client": values["http_client"], } values["client"] = openai.AzureOpenAI(**client_params).embeddings values["async_client"] = openai.AsyncAzureOpenAI(**client_params).embeddings else: values["client"] = openai.Embedding return values @property def _llm_type(self) -> str: return "azure-openai-chat"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~opensearch_vector_search.py
from __future__ import annotations import uuid import warnings from typing import Any, Dict, Iterable, List, Optional, Tuple import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils import get_from_dict_or_env from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance IMPORT_OPENSEARCH_PY_ERROR = ( "Could not import OpenSearch. Please install it with `pip install opensearch-py`." ) SCRIPT_SCORING_SEARCH = "script_scoring" PAINLESS_SCRIPTING_SEARCH = "painless_scripting" MATCH_ALL_QUERY = {"match_all": {}} # type: Dict def _import_opensearch() -> Any: """Import OpenSearch if available, otherwise raise error.""" try: from opensearchpy import OpenSearch except ImportError: raise ImportError(IMPORT_OPENSEARCH_PY_ERROR) return OpenSearch def _import_bulk() -> Any: """Import bulk if available, otherwise raise error.""" try: from opensearchpy.helpers import bulk except ImportError: raise ImportError(IMPORT_OPENSEARCH_PY_ERROR) return bulk def _import_not_found_error() -> Any: """Import not found error if available, otherwise raise error.""" try: from opensearchpy.exceptions import NotFoundError except ImportError: raise ImportError(IMPORT_OPENSEARCH_PY_ERROR) return NotFoundError def _get_opensearch_client(opensearch_url: str, **kwargs: Any) -> Any: """Get OpenSearch client from the opensearch_url, otherwise raise error.""" try: opensearch = _import_opensearch() client = opensearch(opensearch_url, **kwargs) except ValueError as e: raise ImportError( f"OpenSearch client string provided is not in proper format. " f"Got error: {e} " ) return client def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int) -> None: """Validate Embeddings Length and Bulk Size.""" if embeddings_length == 0: raise RuntimeError("Embeddings size is zero") if bulk_size < embeddings_length: raise RuntimeError( f"The embeddings count, {embeddings_length} is more than the " f"[bulk_size], {bulk_size}. Increase the value of [bulk_size]." ) def _validate_aoss_with_engines(is_aoss: bool, engine: str) -> None: """Validate AOSS with the engine.""" if is_aoss and engine != "nmslib" and engine != "faiss": raise ValueError( "Amazon OpenSearch Service Serverless only " "supports `nmslib` or `faiss` engines" ) def _is_aoss_enabled(http_auth: Any) -> bool: """Check if the service is http_auth is set as `aoss`.""" if ( http_auth is not None and hasattr(http_auth, "service") and http_auth.service == "aoss" ): return True return False def _bulk_ingest_embeddings( client: Any, index_name: str, embeddings: List[List[float]], texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, vector_field: str = "vector_field", text_field: str = "text", mapping: Optional[Dict] = None, max_chunk_bytes: Optional[int] = 1 * 1024 * 1024, is_aoss: bool = False, ) -> List[str]: """Bulk Ingest Embeddings into given index.""" if not mapping: mapping = dict() bulk = _import_bulk() not_found_error = _import_not_found_error() requests = [] return_ids = [] mapping = mapping try: client.indices.get(index=index_name) except not_found_error: client.indices.create(index=index_name, body=mapping) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} _id = ids[i] if ids else str(uuid.uuid4()) request = { "_op_type": "index", "_index": index_name, vector_field: embeddings[i], text_field: text, "metadata": metadata, } if is_aoss: request["id"] = _id else: request["_id"] = _id requests.append(request) return_ids.append(_id) bulk(client, requests, max_chunk_bytes=max_chunk_bytes) if not is_aoss: client.indices.refresh(index=index_name) return return_ids def _default_scripting_text_mapping( dim: int, vector_field: str = "vector_field", ) -> Dict: """For Painless Scripting or Script Scoring,the default mapping to create index.""" return { "mappings": { "properties": { vector_field: {"type": "knn_vector", "dimension": dim}, } } } def _default_text_mapping( dim: int, engine: str = "nmslib", space_type: str = "l2", ef_search: int = 512, ef_construction: int = 512, m: int = 16, vector_field: str = "vector_field", ) -> Dict: """For Approximate k-NN Search, this is the default mapping to create index.""" return { "settings": {"index": {"knn": True, "knn.algo_param.ef_search": ef_search}}, "mappings": { "properties": { vector_field: { "type": "knn_vector", "dimension": dim, "method": { "name": "hnsw", "space_type": space_type, "engine": engine, "parameters": {"ef_construction": ef_construction, "m": m}, }, } } }, } def _default_approximate_search_query( query_vector: List[float], k: int = 4, vector_field: str = "vector_field", ) -> Dict: """For Approximate k-NN Search, this is the default query.""" return { "size": k, "query": {"knn": {vector_field: {"vector": query_vector, "k": k}}}, } def _approximate_search_query_with_boolean_filter( query_vector: List[float], boolean_filter: Dict, k: int = 4, vector_field: str = "vector_field", subquery_clause: str = "must", ) -> Dict: """For Approximate k-NN Search, with Boolean Filter.""" return { "size": k, "query": { "bool": { "filter": boolean_filter, subquery_clause: [ {"knn": {vector_field: {"vector": query_vector, "k": k}}} ], } }, } def _approximate_search_query_with_efficient_filter( query_vector: List[float], efficient_filter: Dict, k: int = 4, vector_field: str = "vector_field", ) -> Dict: """For Approximate k-NN Search, with Efficient Filter for Lucene and Faiss Engines.""" search_query = _default_approximate_search_query( query_vector, k=k, vector_field=vector_field ) search_query["query"]["knn"][vector_field]["filter"] = efficient_filter return search_query def _default_script_query( query_vector: List[float], k: int = 4, space_type: str = "l2", pre_filter: Optional[Dict] = None, vector_field: str = "vector_field", ) -> Dict: """For Script Scoring Search, this is the default query.""" if not pre_filter: pre_filter = MATCH_ALL_QUERY return { "size": k, "query": { "script_score": { "query": pre_filter, "script": { "source": "knn_score", "lang": "knn", "params": { "field": vector_field, "query_value": query_vector, "space_type": space_type, }, }, } }, } def __get_painless_scripting_source( space_type: str, vector_field: str = "vector_field" ) -> str: """For Painless Scripting, it returns the script source based on space type.""" source_value = ( "(1.0 + " + space_type + "(params.query_value, doc['" + vector_field + "']))" ) if space_type == "cosineSimilarity": return source_value else: return "1/" + source_value def _default_painless_scripting_query( query_vector: List[float], k: int = 4, space_type: str = "l2Squared", pre_filter: Optional[Dict] = None, vector_field: str = "vector_field", ) -> Dict: """For Painless Scripting Search, this is the default query.""" if not pre_filter: pre_filter = MATCH_ALL_QUERY source = __get_painless_scripting_source(space_type, vector_field=vector_field) return { "size": k, "query": { "script_score": { "query": pre_filter, "script": { "source": source, "params": { "field": vector_field, "query_value": query_vector, }, }, } }, } class OpenSearchVectorSearch(VectorStore): """`Amazon OpenSearch Vector Engine` vector store. Example: .. code-block:: python from langchain_community.vectorstores import OpenSearchVectorSearch opensearch_vector_search = OpenSearchVectorSearch( "http://localhost:9200", "embeddings", embedding_function ) """ def __init__( self, opensearch_url: str, index_name: str, embedding_function: Embeddings, **kwargs: Any, ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index_name = index_name http_auth = kwargs.get("http_auth") self.is_aoss = _is_aoss_enabled(http_auth=http_auth) self.client = _get_opensearch_client(opensearch_url, **kwargs) self.engine = kwargs.get("engine") @property def embeddings(self) -> Embeddings: return self.embedding_function def __add( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, bulk_size: int = 500, **kwargs: Any, ) -> List[str]: _validate_embeddings_and_bulk_size(len(embeddings), bulk_size) index_name = kwargs.get("index_name", self.index_name) text_field = kwargs.get("text_field", "text") dim = len(embeddings[0]) engine = kwargs.get("engine", "nmslib") space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) m = kwargs.get("m", 16) vector_field = kwargs.get("vector_field", "vector_field") max_chunk_bytes = kwargs.get("max_chunk_bytes", 1 * 1024 * 1024) _validate_aoss_with_engines(self.is_aoss, engine) mapping = _default_text_mapping( dim, engine, space_type, ef_search, ef_construction, m, vector_field ) return _bulk_ingest_embeddings( self.client, index_name, embeddings, texts, metadatas=metadatas, ids=ids, vector_field=vector_field, text_field=text_field, mapping=mapping, max_chunk_bytes=max_chunk_bytes, is_aoss=self.is_aoss, ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, bulk_size: int = 500, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. bulk_size: Bulk API request count; Default: 500 Returns: List of ids from adding the texts into the vectorstore. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". """ embeddings = self.embedding_function.embed_documents(list(texts)) return self.__add( texts, embeddings, metadatas=metadatas, ids=ids, bulk_size=bulk_size, **kwargs, ) def add_embeddings( self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, bulk_size: int = 500, **kwargs: Any, ) -> List[str]: """Add the given texts and embeddings to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. bulk_size: Bulk API request count; Default: 500 Returns: List of ids from adding the texts into the vectorstore. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". """ texts, embeddings = zip(*text_embeddings) return self.__add( list(texts), list(embeddings), metadatas=metadatas, ids=ids, bulk_size=bulk_size, **kwargs, ) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". metadata_field: Document field that metadata is stored in. Defaults to "metadata". Can be set to a special value "*" to include the entire document. Optional Args for Approximate Search: search_type: "approximate_search"; default: "approximate_search" boolean_filter: A Boolean filter is a post filter consists of a Boolean query that contains a k-NN query and a filter. subquery_clause: Query clause on the knn vector field; default: "must" lucene_filter: the Lucene algorithm decides whether to perform an exact k-NN search with pre-filtering or an approximate search with modified post-filtering. (deprecated, use `efficient_filter`) efficient_filter: the Lucene Engine or Faiss Engine decides whether to perform an exact k-NN search with pre-filtering or an approximate search with modified post-filtering. Optional Args for Script Scoring Search: search_type: "script_scoring"; default: "approximate_search" space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct", "hammingbit"; default: "l2" pre_filter: script_score query to pre-filter documents before identifying nearest neighbors; default: {"match_all": {}} Optional Args for Painless Scripting Search: search_type: "painless_scripting"; default: "approximate_search" space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared" pre_filter: script_score query to pre-filter documents before identifying nearest neighbors; default: {"match_all": {}} """ docs_with_scores = self.similarity_search_with_score(query, k, **kwargs) return [doc[0] for doc in docs_with_scores] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs and it's scores most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents along with its scores most similar to the query. Optional Args: same as `similarity_search` """ text_field = kwargs.get("text_field", "text") metadata_field = kwargs.get("metadata_field", "metadata") hits = self._raw_similarity_search_with_score(query=query, k=k, **kwargs) documents_with_scores = [ ( Document( page_content=hit["_source"][text_field], metadata=hit["_source"] if metadata_field == "*" or metadata_field not in hit["_source"] else hit["_source"][metadata_field], ), hit["_score"], ) for hit in hits ] return documents_with_scores def _raw_similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[dict]: """Return raw opensearch documents (dict) including vectors, scores most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of dict with its scores most similar to the query. Optional Args: same as `similarity_search` """ embedding = self.embedding_function.embed_query(query) search_type = kwargs.get("search_type", "approximate_search") vector_field = kwargs.get("vector_field", "vector_field") index_name = kwargs.get("index_name", self.index_name) filter = kwargs.get("filter", {}) if ( self.is_aoss and search_type != "approximate_search" and search_type != SCRIPT_SCORING_SEARCH ): raise ValueError( "Amazon OpenSearch Service Serverless only " "supports `approximate_search` and `script_scoring`" ) if search_type == "approximate_search": boolean_filter = kwargs.get("boolean_filter", {}) subquery_clause = kwargs.get("subquery_clause", "must") efficient_filter = kwargs.get("efficient_filter", {}) # `lucene_filter` is deprecated, added for Backwards Compatibility lucene_filter = kwargs.get("lucene_filter", {}) if boolean_filter != {} and efficient_filter != {}: raise ValueError( "Both `boolean_filter` and `efficient_filter` are provided which " "is invalid" ) if lucene_filter != {} and efficient_filter != {}: raise ValueError( "Both `lucene_filter` and `efficient_filter` are provided which " "is invalid. `lucene_filter` is deprecated" ) if lucene_filter != {} and boolean_filter != {}: raise ValueError( "Both `lucene_filter` and `boolean_filter` are provided which " "is invalid. `lucene_filter` is deprecated" ) if ( efficient_filter == {} and boolean_filter == {} and lucene_filter == {} and filter != {} ): if self.engine in ["faiss", "lucene"]: efficient_filter = filter else: boolean_filter = filter if boolean_filter != {}: search_query = _approximate_search_query_with_boolean_filter( embedding, boolean_filter, k=k, vector_field=vector_field, subquery_clause=subquery_clause, ) elif efficient_filter != {}: search_query = _approximate_search_query_with_efficient_filter( embedding, efficient_filter, k=k, vector_field=vector_field ) elif lucene_filter != {}: warnings.warn( "`lucene_filter` is deprecated. Please use the keyword argument" " `efficient_filter`" ) search_query = _approximate_search_query_with_efficient_filter( embedding, lucene_filter, k=k, vector_field=vector_field ) else: search_query = _default_approximate_search_query( embedding, k=k, vector_field=vector_field ) elif search_type == SCRIPT_SCORING_SEARCH: space_type = kwargs.get("space_type", "l2") pre_filter = kwargs.get("pre_filter", MATCH_ALL_QUERY) search_query = _default_script_query( embedding, k, space_type, pre_filter, vector_field ) elif search_type == PAINLESS_SCRIPTING_SEARCH: space_type = kwargs.get("space_type", "l2Squared") pre_filter = kwargs.get("pre_filter", MATCH_ALL_QUERY) search_query = _default_painless_scripting_query( embedding, k, space_type, pre_filter, vector_field ) else: raise ValueError("Invalid `search_type` provided as an argument") response = self.client.search(index=index_name, body=search_query) return [hit for hit in response["hits"]["hits"]] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> list[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ vector_field = kwargs.get("vector_field", "vector_field") text_field = kwargs.get("text_field", "text") metadata_field = kwargs.get("metadata_field", "metadata") # Get embedding of the user query embedding = self.embedding_function.embed_query(query) # Do ANN/KNN search to get top fetch_k results where fetch_k >= k results = self._raw_similarity_search_with_score(query, fetch_k, **kwargs) embeddings = [result["_source"][vector_field] for result in results] # Rerank top k results using MMR, (mmr_selected is a list of indices) mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) return [ Document( page_content=results[i]["_source"][text_field], metadata=results[i]["_source"][metadata_field], ) for i in mmr_selected ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, bulk_size: int = 500, ids: Optional[List[str]] = None, **kwargs: Any, ) -> OpenSearchVectorSearch: """Construct OpenSearchVectorSearch wrapper from raw texts. Example: .. code-block:: python from langchain_community.vectorstores import OpenSearchVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() opensearch_vector_search = OpenSearchVectorSearch.from_texts( texts, embeddings, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". Optional Keyword Args for Approximate Search: engine: "nmslib", "faiss", "lucene"; default: "nmslib" space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2" ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting: is_appx_search: False """ embeddings = embedding.embed_documents(texts) return cls.from_embeddings( embeddings, texts, embedding, metadatas=metadatas, bulk_size=bulk_size, ids=ids, **kwargs, ) @classmethod def from_embeddings( cls, embeddings: List[List[float]], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, bulk_size: int = 500, ids: Optional[List[str]] = None, **kwargs: Any, ) -> OpenSearchVectorSearch: """Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings. Example: .. code-block:: python from langchain_community.vectorstores import OpenSearchVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embedder = OpenAIEmbeddings() embeddings = embedder.embed_documents(["foo", "bar"]) opensearch_vector_search = OpenSearchVectorSearch.from_embeddings( embeddings, texts, embedder, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". Optional Keyword Args for Approximate Search: engine: "nmslib", "faiss", "lucene"; default: "nmslib" space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2" ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting: is_appx_search: False """ opensearch_url = get_from_dict_or_env( kwargs, "opensearch_url", "OPENSEARCH_URL" ) # List of arguments that needs to be removed from kwargs # before passing kwargs to get opensearch client keys_list = [ "opensearch_url", "index_name", "is_appx_search", "vector_field", "text_field", "engine", "space_type", "ef_search", "ef_construction", "m", "max_chunk_bytes", "is_aoss", ] _validate_embeddings_and_bulk_size(len(embeddings), bulk_size) dim = len(embeddings[0]) # Get the index name from either from kwargs or ENV Variable # before falling back to random generation index_name = get_from_dict_or_env( kwargs, "index_name", "OPENSEARCH_INDEX_NAME", default=uuid.uuid4().hex ) is_appx_search = kwargs.get("is_appx_search", True) vector_field = kwargs.get("vector_field", "vector_field") text_field = kwargs.get("text_field", "text") max_chunk_bytes = kwargs.get("max_chunk_bytes", 1 * 1024 * 1024) http_auth = kwargs.get("http_auth") is_aoss = _is_aoss_enabled(http_auth=http_auth) engine = None if is_aoss and not is_appx_search: raise ValueError( "Amazon OpenSearch Service Serverless only " "supports `approximate_search`" ) if is_appx_search: engine = kwargs.get("engine", "nmslib") space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) m = kwargs.get("m", 16) _validate_aoss_with_engines(is_aoss, engine) mapping = _default_text_mapping( dim, engine, space_type, ef_search, ef_construction, m, vector_field ) else: mapping = _default_scripting_text_mapping(dim) [kwargs.pop(key, None) for key in keys_list] client = _get_opensearch_client(opensearch_url, **kwargs) _bulk_ingest_embeddings( client, index_name, embeddings, texts, ids=ids, metadatas=metadatas, vector_field=vector_field, text_field=text_field, mapping=mapping, max_chunk_bytes=max_chunk_bytes, is_aoss=is_aoss, ) kwargs["engine"] = engine return cls(opensearch_url, index_name, embedding, **kwargs)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~dashvector.py
from __future__ import annotations import logging import uuid from typing import ( Any, Iterable, List, Optional, Tuple, ) import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils import get_from_env from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class DashVector(VectorStore): """`DashVector` vector store. To use, you should have the ``dashvector`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import DashVector from langchain_community.embeddings.openai import OpenAIEmbeddings import dashvector client = dashvector.Client(api_key="***") client.create("langchain", dimension=1024) collection = client.get("langchain") embeddings = OpenAIEmbeddings() vectorstore = DashVector(collection, embeddings.embed_query, "text") """ def __init__( self, collection: Any, embedding: Embeddings, text_field: str, ): """Initialize with DashVector collection.""" try: import dashvector except ImportError: raise ValueError( "Could not import dashvector python package. " "Please install it with `pip install dashvector`." ) if not isinstance(collection, dashvector.Collection): raise ValueError( f"collection should be an instance of dashvector.Collection, " f"bug got {type(collection)}" ) self._collection = collection self._embedding = embedding self._text_field = text_field def _similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query vector, along with scores""" # query by vector ret = self._collection.query(embedding, topk=k, filter=filter) if not ret: raise ValueError( f"Fail to query docs by vector, error {self._collection.message}" ) docs = [] for doc in ret: metadata = doc.fields text = metadata.pop(self._text_field) score = doc.score docs.append((Document(page_content=text, metadata=metadata), score)) return docs def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 25, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. batch_size: Optional batch size to upsert docs. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ ids = ids or [str(uuid.uuid4().hex) for _ in texts] text_list = list(texts) for i in range(0, len(text_list), batch_size): # batch end end = min(i + batch_size, len(text_list)) batch_texts = text_list[i:end] batch_ids = ids[i:end] batch_embeddings = self._embedding.embed_documents(list(batch_texts)) # batch metadatas if metadatas: batch_metadatas = metadatas[i:end] else: batch_metadatas = [{} for _ in range(i, end)] for metadata, text in zip(batch_metadatas, batch_texts): metadata[self._text_field] = text # batch upsert to collection docs = list(zip(batch_ids, batch_embeddings, batch_metadatas)) ret = self._collection.upsert(docs) if not ret: raise ValueError( f"Fail to upsert docs to dashvector vector database," f"Error: {ret.message}" ) return ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool: """Delete by vector ID. Args: ids: List of ids to delete. Returns: True if deletion is successful, False otherwise. """ return bool(self._collection.delete(ids)) def similarity_search( self, query: str, k: int = 4, filter: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to search documents similar to. k: Number of documents to return. Default to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents most similar to the query text. """ docs_and_scores = self.similarity_search_with_relevance_scores(query, k, filter) return [doc for doc, _ in docs_and_scores] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, filter: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query text , alone with relevance scores. Less is more similar, more is more dissimilar. Args: query: input text k: Number of Documents to return. Defaults to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Tuples of (doc, similarity_score) """ embedding = self._embedding.embed_query(query) return self._similarity_search_with_score_by_vector( embedding, k=k, filter=filter ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self._similarity_search_with_score_by_vector( embedding, k, filter ) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents selected by maximal marginal relevance. """ # query by vector ret = self._collection.query( embedding, topk=fetch_k, filter=filter, include_vector=True ) if not ret: raise ValueError( f"Fail to query docs by vector, error {self._collection.message}" ) candidate_embeddings = [doc.vector for doc in ret] mmr_selected = maximal_marginal_relevance( np.array(embedding), candidate_embeddings, lambda_mult, k ) metadatas = [ret.output[i].fields for i in mmr_selected] return [ Document(page_content=metadata.pop(self._text_field), metadata=metadata) for metadata in metadatas ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, dashvector_api_key: Optional[str] = None, dashvector_endpoint: Optional[str] = None, collection_name: str = "langchain", text_field: str = "text", batch_size: int = 25, ids: Optional[List[str]] = None, **kwargs: Any, ) -> DashVector: """Return DashVector VectorStore initialized from texts and embeddings. This is the quick way to get started with dashvector vector store. Example: .. code-block:: python from langchain_community.vectorstores import DashVector from langchain_community.embeddings import OpenAIEmbeddings import dashvector embeddings = OpenAIEmbeddings() dashvector = DashVector.from_documents( docs, embeddings, dashvector_api_key="{DASHVECTOR_API_KEY}" ) """ try: import dashvector except ImportError: raise ValueError( "Could not import dashvector python package. " "Please install it with `pip install dashvector`." ) dashvector_api_key = dashvector_api_key or get_from_env( "dashvector_api_key", "DASHVECTOR_API_KEY" ) dashvector_endpoint = dashvector_endpoint or get_from_env( "dashvector_endpoint", "DASHVECTOR_ENDPOINT", default="dashvector.cn-hangzhou.aliyuncs.com", ) dashvector_client = dashvector.Client( api_key=dashvector_api_key, endpoint=dashvector_endpoint ) dashvector_client.delete(collection_name) collection = dashvector_client.get(collection_name) if not collection: dim = len(embedding.embed_query(texts[0])) # create collection if not existed resp = dashvector_client.create(collection_name, dimension=dim) if resp: collection = dashvector_client.get(collection_name) else: raise ValueError( "Fail to create collection. " f"Error: {resp.message}." ) dashvector_vector_db = cls(collection, embedding, text_field) dashvector_vector_db.add_texts(texts, metadatas, ids, batch_size) return dashvector_vector_db
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_tair.py
"""Test tair functionality.""" from libs.core.langchain_core.documents import Document from langchain_community.vectorstores.tair import Tair from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_tair() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = Tair.from_texts( texts, FakeEmbeddings(), tair_url="redis://localhost:6379" ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~question_answering~map_reduce_prompt.py
# flake8: noqa from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model from libs.core.langchain_core.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from libs.core.langchain_core.prompts.prompt import PromptTemplate question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question. Return any relevant text verbatim. {context} Question: {question} Relevant text, if any:""" QUESTION_PROMPT = PromptTemplate( template=question_prompt_template, input_variables=["context", "question"] ) system_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question. Return any relevant text verbatim. ______________________ {context}""" messages = [ SystemMessagePromptTemplate.from_template(system_template), HumanMessagePromptTemplate.from_template("{question}"), ] CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(messages) QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=QUESTION_PROMPT, conditionals=[(is_chat_model, CHAT_QUESTION_PROMPT)] ) combine_prompt_template = """Given the following extracted parts of a long document and a question, create a final answer. If you don't know the answer, just say that you don't know. Don't try to make up an answer. QUESTION: Which state/country's law governs the interpretation of the contract? ========= Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights. Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries. Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur, ========= FINAL ANSWER: This Agreement is governed by English law. QUESTION: What did the president say about Michael Jackson? ========= Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay. Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation. ========= FINAL ANSWER: The president did not mention Michael Jackson. QUESTION: {question} ========= {summaries} ========= FINAL ANSWER:""" COMBINE_PROMPT = PromptTemplate( template=combine_prompt_template, input_variables=["summaries", "question"] ) system_template = """Given the following extracted parts of a long document and a question, create a final answer. If you don't know the answer, just say that you don't know. Don't try to make up an answer. ______________________ {summaries}""" messages = [ SystemMessagePromptTemplate.from_template(system_template), HumanMessagePromptTemplate.from_template("{question}"), ] CHAT_COMBINE_PROMPT = ChatPromptTemplate.from_messages(messages) COMBINE_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=COMBINE_PROMPT, conditionals=[(is_chat_model, CHAT_COMBINE_PROMPT)] )
[ "Given the following extracted parts of a long document and a question, create a final answer. \nIf you don't know the answer, just say that you don't know. Don't try to make up an answer.\n______________________\n{summaries}", "Given the following extracted parts of a long document and a question, create a final answer. \nIf you don't know the answer, just say that you don't know. Don't try to make up an answer.\n\nQUESTION: Which state/country's law governs the interpretation of the contract?\n=========\nContent: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n\nContent: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.\n\nContent: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,\n=========\nFINAL ANSWER: This Agreement is governed by English law.\n\nQUESTION: What did the president say about Michael Jackson?\n=========\nContent: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.\n\nContent: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.\n\nContent: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.\n\nContent: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.\n=========\nFINAL ANSWER: The president did not mention Michael Jackson.\n\nQUESTION: {question}\n=========\n{summaries}\n=========\nFINAL ANSWER:", "question", "context", "Use the following portion of a long document to see if any of the text is relevant to answer the question. \nReturn any relevant text verbatim.\n{context}\nQuestion: {question}\nRelevant text, if any:", "Use the following portion of a long document to see if any of the text is relevant to answer the question. \nReturn any relevant text verbatim.\n______________________\n{context}", "{question}" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~tensorflow_datasets.py
from typing import Callable, Dict, Iterator, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.utilities.tensorflow_datasets import TensorflowDatasets class TensorflowDatasetLoader(BaseLoader): """Load from `TensorFlow Dataset`. Attributes: dataset_name: the name of the dataset to load split_name: the name of the split to load. load_max_docs: a limit to the number of loaded documents. Defaults to 100. sample_to_document_function: a function that converts a dataset sample into a Document Example: .. code-block:: python from langchain_community.document_loaders import TensorflowDatasetLoader def mlqaen_example_to_document(example: dict) -> Document: return Document( page_content=decode_to_str(example["context"]), metadata={ "id": decode_to_str(example["id"]), "title": decode_to_str(example["title"]), "question": decode_to_str(example["question"]), "answer": decode_to_str(example["answers"]["text"][0]), }, ) tsds_client = TensorflowDatasetLoader( dataset_name="mlqa/en", split_name="test", load_max_docs=100, sample_to_document_function=mlqaen_example_to_document, ) """ def __init__( self, dataset_name: str, split_name: str, load_max_docs: Optional[int] = 100, sample_to_document_function: Optional[Callable[[Dict], Document]] = None, ): """Initialize the TensorflowDatasetLoader. Args: dataset_name: the name of the dataset to load split_name: the name of the split to load. load_max_docs: a limit to the number of loaded documents. Defaults to 100. sample_to_document_function: a function that converts a dataset sample into a Document. """ self.dataset_name: str = dataset_name self.split_name: str = split_name self.load_max_docs = load_max_docs """The maximum number of documents to load.""" self.sample_to_document_function: Optional[ Callable[[Dict], Document] ] = sample_to_document_function """Custom function that transform a dataset sample into a Document.""" self._tfds_client = TensorflowDatasets( dataset_name=self.dataset_name, split_name=self.split_name, load_max_docs=self.load_max_docs, sample_to_document_function=self.sample_to_document_function, ) def lazy_load(self) -> Iterator[Document]: yield from self._tfds_client.lazy_load() def load(self) -> List[Document]: return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_sqlitevss.py
from typing import List, Optional import pytest from libs.core.langchain_core.documents import Document from langchain_community.vectorstores import SQLiteVSS from tests.integration_tests.vectorstores.fake_embeddings import ( FakeEmbeddings, fake_texts, ) def _sqlite_vss_from_texts( metadatas: Optional[List[dict]] = None, drop: bool = True ) -> SQLiteVSS: return SQLiteVSS.from_texts( fake_texts, FakeEmbeddings(), metadatas=metadatas, table="test", db_file=":memory:", ) @pytest.mark.requires("sqlite-vss") def test_sqlitevss() -> None: """Test end to end construction and search.""" docsearch = _sqlite_vss_from_texts() output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={})] @pytest.mark.requires("sqlite-vss") def test_sqlitevss_with_score() -> None: """Test end to end construction and search with scores and IDs.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = _sqlite_vss_from_texts(metadatas=metadatas) output = docsearch.similarity_search_with_score("foo", k=3) docs = [o[0] for o in output] distances = [o[1] for o in output] assert docs == [ Document(page_content="foo", metadata={"page": 0}), Document(page_content="bar", metadata={"page": 1}), Document(page_content="baz", metadata={"page": 2}), ] assert distances[0] < distances[1] < distances[2] @pytest.mark.requires("sqlite-vss") def test_sqlitevss_add_extra() -> None: """Test end to end construction and MRR search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = _sqlite_vss_from_texts(metadatas=metadatas) docsearch.add_texts(texts, metadatas) output = docsearch.similarity_search("foo", k=10) assert len(output) == 6
[]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~test_nuclia_transformer.py
import asyncio import json from typing import Any from unittest import mock from libs.core.langchain_core.documents import Document from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer from langchain.tools.nuclia.tool import NucliaUnderstandingAPI def fakerun(**args: Any) -> Any: async def run(self: Any, **args: Any) -> str: await asyncio.sleep(0.1) data = { "extracted_text": [{"body": {"text": "Hello World"}}], "file_extracted_data": [{"language": "en"}], "field_metadata": [ { "metadata": { "metadata": { "paragraphs": [ {"end": 66, "sentences": [{"start": 1, "end": 67}]} ] } } } ], } return json.dumps(data) return run async def test_nuclia_loader() -> None: with mock.patch( "langchain.tools.nuclia.tool.NucliaUnderstandingAPI._arun", new_callable=fakerun ): with mock.patch("os.environ.get", return_value="_a_key_"): nua = NucliaUnderstandingAPI(enable_ml=False) documents = [ Document(page_content="Hello, my name is Alice", metadata={}), Document(page_content="Hello, my name is Bob", metadata={}), ] nuclia_transformer = NucliaTextTransformer(nua) transformed_documents = await nuclia_transformer.atransform_documents( documents ) assert len(transformed_documents) == 2 assert ( transformed_documents[0].metadata["nuclia"]["file"]["language"] == "en" ) assert ( len( transformed_documents[1].metadata["nuclia"]["metadata"]["metadata"][ "metadata" ]["paragraphs"] ) == 1 )
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~memory~combined.py
import warnings from typing import Any, Dict, List, Set from libs.core.langchain_core.memory import BaseMemory from libs.core.langchain_core.pydantic_v1 import validator from langchain.memory.chat_memory import BaseChatMemory class CombinedMemory(BaseMemory): """Combining multiple memories' data together.""" memories: List[BaseMemory] """For tracking all the memories that should be accessed.""" @validator("memories") def check_repeated_memory_variable( cls, value: List[BaseMemory] ) -> List[BaseMemory]: all_variables: Set[str] = set() for val in value: overlap = all_variables.intersection(val.memory_variables) if overlap: raise ValueError( f"The same variables {overlap} are found in multiple" "memory object, which is not allowed by CombinedMemory." ) all_variables |= set(val.memory_variables) return value @validator("memories") def check_input_key(cls, value: List[BaseMemory]) -> List[BaseMemory]: """Check that if memories are of type BaseChatMemory that input keys exist.""" for val in value: if isinstance(val, BaseChatMemory): if val.input_key is None: warnings.warn( "When using CombinedMemory, " "input keys should be so the input is known. " f" Was not set on {val}" ) return value @property def memory_variables(self) -> List[str]: """All the memory variables that this instance provides.""" """Collected from the all the linked memories.""" memory_variables = [] for memory in self.memories: memory_variables.extend(memory.memory_variables) return memory_variables def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Load all vars from sub-memories.""" memory_data: Dict[str, Any] = {} # Collect vars from all sub-memories for memory in self.memories: data = memory.load_memory_variables(inputs) for key, value in data.items(): if key in memory_data: raise ValueError( f"The variable {key} is repeated in the CombinedMemory." ) memory_data[key] = value return memory_data def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this session for every memory.""" # Save context for all sub-memories for memory in self.memories: memory.save_context(inputs, outputs) def clear(self) -> None: """Clear context from this session for every memory.""" for memory in self.memories: memory.clear()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~baiducloud_vector_search.py
import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, ) from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from elasticsearch import Elasticsearch logger = logging.getLogger(__name__) class BESVectorStore(VectorStore): """`Baidu Elasticsearch` vector store. Example: .. code-block:: python from langchain_community.vectorstores import BESVectorStore from langchain_community.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = BESVectorStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", bes_url="http://localhost:9200" ) Args: index_name: Name of the Elasticsearch index to create. bes_url: URL of the Baidu Elasticsearch instance to connect to. user: Username to use when connecting to Elasticsearch. password: Password to use when connecting to Elasticsearch. More information can be obtained from: https://cloud.baidu.com/doc/BES/s/8llyn0hh4 """ def __init__( self, index_name: str, bes_url: str, user: Optional[str] = None, password: Optional[str] = None, embedding: Optional[Embeddings] = None, **kwargs: Optional[dict], ) -> None: self.embedding = embedding self.index_name = index_name self.query_field = kwargs.get("query_field", "text") self.vector_query_field = kwargs.get("vector_query_field", "vector") self.space_type = kwargs.get("space_type", "cosine") self.index_type = kwargs.get("index_type", "linear") self.index_params = kwargs.get("index_params") or {} if bes_url is not None: self.client = BESVectorStore.bes_client( bes_url=bes_url, username=user, password=password ) else: raise ValueError("""Please specified a bes connection url.""") @property def embeddings(self) -> Optional[Embeddings]: return self.embedding @staticmethod def bes_client( *, bes_url: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> "Elasticsearch": try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) connection_params: Dict[str, Any] = {} connection_params["hosts"] = [bes_url] if username and password: connection_params["basic_auth"] = (username, password) es_client = elasticsearch.Elasticsearch(**connection_params) try: es_client.info() except Exception as e: logger.error(f"Error connecting to Elasticsearch: {e}") raise e return es_client def _create_index_if_not_exists(self, dims_length: Optional[int] = None) -> None: """Create the index if it doesn't already exist. Args: dims_length: Length of the embedding vectors. """ if self.client.indices.exists(index=self.index_name): logger.info(f"Index {self.index_name} already exists. Skipping creation.") else: if dims_length is None: raise ValueError( "Cannot create index without specifying dims_length " + "when the index doesn't already exist. " ) indexMapping = self._index_mapping(dims_length=dims_length) logger.debug( f"Creating index {self.index_name} with mappings {indexMapping}" ) self.client.indices.create( index=self.index_name, body={ "settings": {"index": {"knn": True}}, "mappings": {"properties": indexMapping}, }, ) def _index_mapping(self, dims_length: Union[int, None]) -> Dict: """ Executes when the index is created. Args: dims_length: Numeric length of the embedding vectors, or None if not using vector-based query. index_params: The extra pamameters for creating index. Returns: Dict: The Elasticsearch settings and mappings for the strategy. """ if "linear" == self.index_type: return { self.vector_query_field: { "type": "bpack_vector", "dims": dims_length, "build_index": self.index_params.get("build_index", False), } } elif "hnsw" == self.index_type: return { self.vector_query_field: { "type": "bpack_vector", "dims": dims_length, "index_type": "hnsw", "space_type": self.space_type, "parameters": { "ef_construction": self.index_params.get( "hnsw_ef_construction", 200 ), "m": self.index_params.get("hnsw_m", 4), }, } } else: return { self.vector_query_field: { "type": "bpack_vector", "model_id": self.index_params.get("model_id", ""), } } def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Optional[bool]: """Delete documents from the index. Args: ids: List of ids of documents to delete """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) body = [] if ids is None: raise ValueError("ids must be provided.") for _id in ids: body.append({"_op_type": "delete", "_index": self.index_name, "_id": _id}) if len(body) > 0: try: bulk( self.client, body, refresh=kwargs.get("refresh_indices", True), ignore_status=404, ) logger.debug(f"Deleted {len(body)} texts from index") return True except BulkIndexError as e: logger.error(f"Error deleting texts: {e}") raise e else: logger.info("No documents to delete") return False def _query_body( self, query_vector: Union[List[float], None], filter: Optional[dict] = None, search_params: Dict = {}, ) -> Dict: query_vector_body = {"vector": query_vector, "k": search_params.get("k", 2)} if filter is not None and len(filter) != 0: query_vector_body["filter"] = filter if "linear" == self.index_type: query_vector_body["linear"] = True else: query_vector_body["ef"] = search_params.get("ef", 10) return { "size": search_params.get("size", 4), "query": {"knn": {self.vector_query_field: query_vector_body}}, } def _search( self, query: Optional[str] = None, query_vector: Union[List[float], None] = None, filter: Optional[dict] = None, custom_query: Optional[Callable[[Dict, Union[str, None]], Dict]] = None, search_params: Dict = {}, ) -> List[Tuple[Document, float]]: """Return searched documents result from BES Args: query: Text to look up documents similar to. query_vector: Embedding to look up documents similar to. filter: Array of Baidu ElasticSearch filter clauses to apply to the query. custom_query: Function to modify the query body before it is sent to BES. Returns: List of Documents most similar to the query and score for each """ if self.embedding and query is not None: query_vector = self.embedding.embed_query(query) query_body = self._query_body( query_vector=query_vector, filter=filter, search_params=search_params ) if custom_query is not None: query_body = custom_query(query_body, query) logger.debug(f"Calling custom_query, Query body now: {query_body}") logger.debug(f"Query body: {query_body}") # Perform the kNN search on the BES index and return the results. response = self.client.search(index=self.index_name, body=query_body) logger.debug(f"response={response}") hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"][self.query_field], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query, in descending order of similarity. """ results = self.similarity_search_with_score( query=query, k=k, filter=filter, **kwargs ) return [doc for doc, _ in results] def similarity_search_with_score( self, query: str, k: int, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return documents most similar to query, along with scores. Args: query: Text to look up documents similar to. size: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query and score for each """ search_params = kwargs.get("search_params") or {} if len(search_params) == 0 or search_params.get("size") is None: search_params["size"] = k return self._search(query=query, filter=filter, **kwargs) @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, **kwargs: Any, ) -> "BESVectorStore": """Construct BESVectorStore wrapper from documents. Args: documents: List of documents to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. kwargs: create index key words arguments """ vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs) # Encode the provided texts and add them to the newly created index. vectorStore.add_documents(documents) return vectorStore @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ) -> "BESVectorStore": """Construct BESVectorStore wrapper from raw documents. Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. kwargs: create index key words arguments """ vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs) # Encode the provided texts and add them to the newly created index. vectorStore.add_texts(texts, metadatas=metadatas, **kwargs) return vectorStore def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) embeddings = [] create_index_if_not_exists = kwargs.get("create_index_if_not_exists", True) ids = kwargs.get("ids", [str(uuid.uuid4()) for _ in texts]) refresh_indices = kwargs.get("refresh_indices", True) requests = [] if self.embedding is not None: embeddings = self.embedding.embed_documents(list(texts)) dims_length = len(embeddings[0]) if create_index_if_not_exists: self._create_index_if_not_exists(dims_length=dims_length) for i, (text, vector) in enumerate(zip(texts, embeddings)): metadata = metadatas[i] if metadatas else {} requests.append( { "_op_type": "index", "_index": self.index_name, self.query_field: text, self.vector_query_field: vector, "metadata": metadata, "_id": ids[i], } ) else: if create_index_if_not_exists: self._create_index_if_not_exists() for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} requests.append( { "_op_type": "index", "_index": self.index_name, self.query_field: text, "metadata": metadata, "_id": ids[i], } ) if len(requests) > 0: try: success, failed = bulk( self.client, requests, stats_only=True, refresh=refresh_indices ) logger.debug( f"Added {success} and failed to add {failed} texts to index" ) logger.debug(f"added texts {ids} to index") return ids except BulkIndexError as e: logger.error(f"Error adding texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e else: logger.debug("No texts to add to index") return [] @staticmethod def _bes_vector_store( embedding: Optional[Embeddings] = None, **kwargs: Any ) -> "BESVectorStore": index_name = kwargs.get("index_name") if index_name is None: raise ValueError("Please provide an index_name.") bes_url = kwargs.get("bes_url") if bes_url is None: raise ValueError("Please provided a valid bes connection url") return BESVectorStore(embedding=embedding, **kwargs)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~gigachat.py
from __future__ import annotations import logging from functools import cached_property from typing import Any, AsyncIterator, Dict, Iterator, List, Optional from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import BaseLLM from libs.core.langchain_core.load.serializable import Serializable from libs.core.langchain_core.outputs import Generation, GenerationChunk, LLMResult from libs.core.langchain_core.pydantic_v1 import root_validator logger = logging.getLogger(__name__) class _BaseGigaChat(Serializable): base_url: Optional[str] = None """ Base API URL """ auth_url: Optional[str] = None """ Auth URL """ credentials: Optional[str] = None """ Auth Token """ scope: Optional[str] = None """ Permission scope for access token """ access_token: Optional[str] = None """ Access token for GigaChat """ model: Optional[str] = None """Model name to use.""" user: Optional[str] = None """ Username for authenticate """ password: Optional[str] = None """ Password for authenticate """ timeout: Optional[float] = None """ Timeout for request """ verify_ssl_certs: Optional[bool] = None """ Check certificates for all requests """ ca_bundle_file: Optional[str] = None cert_file: Optional[str] = None key_file: Optional[str] = None key_file_password: Optional[str] = None # Support for connection to GigaChat through SSL certificates profanity: bool = True """ Check for profanity """ streaming: bool = False """ Whether to stream the results or not. """ temperature: Optional[float] = None """What sampling temperature to use.""" max_tokens: Optional[int] = None """ Maximum number of tokens to generate """ @property def _llm_type(self) -> str: return "giga-chat-model" @property def lc_secrets(self) -> Dict[str, str]: return { "credentials": "GIGACHAT_CREDENTIALS", "access_token": "GIGACHAT_ACCESS_TOKEN", "password": "GIGACHAT_PASSWORD", "key_file_password": "GIGACHAT_KEY_FILE_PASSWORD", } @property def lc_serializable(self) -> bool: return True @cached_property def _client(self) -> Any: """Returns GigaChat API client""" import gigachat return gigachat.GigaChat( base_url=self.base_url, auth_url=self.auth_url, credentials=self.credentials, scope=self.scope, access_token=self.access_token, model=self.model, user=self.user, password=self.password, timeout=self.timeout, verify_ssl_certs=self.verify_ssl_certs, ca_bundle_file=self.ca_bundle_file, cert_file=self.cert_file, key_file=self.key_file, key_file_password=self.key_file_password, ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate authenticate data in environment and python package is installed.""" try: import gigachat # noqa: F401 except ImportError: raise ImportError( "Could not import gigachat python package. " "Please install it with `pip install gigachat`." ) return values @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "temperature": self.temperature, "model": self.model, "profanity": self.profanity, "streaming": self.streaming, "max_tokens": self.max_tokens, } class GigaChat(_BaseGigaChat, BaseLLM): """`GigaChat` large language models API. To use, you should pass login and password to access GigaChat API or use token. Example: .. code-block:: python from langchain_community.llms import GigaChat giga = GigaChat(credentials=..., verify_ssl_certs=False) """ def _build_payload(self, messages: List[str]) -> Dict[str, Any]: payload: Dict[str, Any] = { "messages": [{"role": "user", "content": m} for m in messages], "profanity_check": self.profanity, } if self.temperature is not None: payload["temperature"] = self.temperature if self.max_tokens is not None: payload["max_tokens"] = self.max_tokens if self.model: payload["model"] = self.model if self.verbose: logger.info("Giga request: %s", payload) return payload def _create_llm_result(self, response: Any) -> LLMResult: generations = [] for res in response.choices: finish_reason = res.finish_reason gen = Generation( text=res.message.content, generation_info={"finish_reason": finish_reason}, ) generations.append([gen]) if finish_reason != "stop": logger.warning( "Giga generation stopped with reason: %s", finish_reason, ) if self.verbose: logger.info("Giga response: %s", res.message.content) token_usage = response.usage llm_output = {"token_usage": token_usage, "model_name": response.model} return LLMResult(generations=generations, llm_output=llm_output) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: should_stream = stream if stream is not None else self.streaming if should_stream: generation: Optional[GenerationChunk] = None stream_iter = self._stream( prompts[0], stop=stop, run_manager=run_manager, **kwargs ) for chunk in stream_iter: if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) payload = self._build_payload(prompts) response = self._client.chat(payload) return self._create_llm_result(response) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: should_stream = stream if stream is not None else self.streaming if should_stream: generation: Optional[GenerationChunk] = None stream_iter = self._astream( prompts[0], stop=stop, run_manager=run_manager, **kwargs ) async for chunk in stream_iter: if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) payload = self._build_payload(prompts) response = await self._client.achat(payload) return self._create_llm_result(response) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: payload = self._build_payload([prompt]) for chunk in self._client.stream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield GenerationChunk(text=content) if run_manager: run_manager.on_llm_new_token(content) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: payload = self._build_payload([prompt]) async for chunk in self._client.astream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield GenerationChunk(text=content) if run_manager: await run_manager.on_llm_new_token(content) def get_num_tokens(self, text: str) -> int: """Count approximate number of tokens""" return round(len(text) / 4.6)
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~test_text_splitter.py
"""Test text splitting functionality.""" import re from pathlib import Path from typing import List import pytest from libs.core.langchain_core.documents import Document from langchain.text_splitter import ( CharacterTextSplitter, HTMLHeaderTextSplitter, Language, MarkdownHeaderTextSplitter, PythonCodeTextSplitter, RecursiveCharacterTextSplitter, Tokenizer, split_text_on_tokens, ) FAKE_PYTHON_TEXT = """ class Foo: def bar(): def foo(): def testing_func(): def bar(): """ def test_character_text_splitter() -> None: """Test splitting by character count.""" text = "foo bar baz 123" splitter = CharacterTextSplitter(separator=" ", chunk_size=7, chunk_overlap=3) output = splitter.split_text(text) expected_output = ["foo bar", "bar baz", "baz 123"] assert output == expected_output def test_character_text_splitter_empty_doc() -> None: """Test splitting by character count doesn't create empty documents.""" text = "foo bar" splitter = CharacterTextSplitter(separator=" ", chunk_size=2, chunk_overlap=0) output = splitter.split_text(text) expected_output = ["foo", "bar"] assert output == expected_output def test_character_text_splitter_separtor_empty_doc() -> None: """Test edge cases are separators.""" text = "f b" splitter = CharacterTextSplitter(separator=" ", chunk_size=2, chunk_overlap=0) output = splitter.split_text(text) expected_output = ["f", "b"] assert output == expected_output def test_character_text_splitter_long() -> None: """Test splitting by character count on long words.""" text = "foo bar baz a a" splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=1) output = splitter.split_text(text) expected_output = ["foo", "bar", "baz", "a a"] assert output == expected_output def test_character_text_splitter_short_words_first() -> None: """Test splitting by character count when shorter words are first.""" text = "a a foo bar baz" splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=1) output = splitter.split_text(text) expected_output = ["a a", "foo", "bar", "baz"] assert output == expected_output def test_character_text_splitter_longer_words() -> None: """Test splitting by characters when splits not found easily.""" text = "foo bar baz 123" splitter = CharacterTextSplitter(separator=" ", chunk_size=1, chunk_overlap=1) output = splitter.split_text(text) expected_output = ["foo", "bar", "baz", "123"] assert output == expected_output @pytest.mark.parametrize( "separator, is_separator_regex", [(re.escape("."), True), (".", False)] ) def test_character_text_splitter_keep_separator_regex( separator: str, is_separator_regex: bool ) -> None: """Test splitting by characters while keeping the separator that is a regex special character. """ text = "foo.bar.baz.123" splitter = CharacterTextSplitter( separator=separator, chunk_size=1, chunk_overlap=0, keep_separator=True, is_separator_regex=is_separator_regex, ) output = splitter.split_text(text) expected_output = ["foo", ".bar", ".baz", ".123"] assert output == expected_output @pytest.mark.parametrize( "separator, is_separator_regex", [(re.escape("."), True), (".", False)] ) def test_character_text_splitter_discard_separator_regex( separator: str, is_separator_regex: bool ) -> None: """Test splitting by characters discarding the separator that is a regex special character.""" text = "foo.bar.baz.123" splitter = CharacterTextSplitter( separator=separator, chunk_size=1, chunk_overlap=0, keep_separator=False, is_separator_regex=is_separator_regex, ) output = splitter.split_text(text) expected_output = ["foo", "bar", "baz", "123"] assert output == expected_output def test_character_text_splitting_args() -> None: """Test invalid arguments.""" with pytest.raises(ValueError): CharacterTextSplitter(chunk_size=2, chunk_overlap=4) def test_merge_splits() -> None: """Test merging splits with a given separator.""" splitter = CharacterTextSplitter(separator=" ", chunk_size=9, chunk_overlap=2) splits = ["foo", "bar", "baz"] expected_output = ["foo bar", "baz"] output = splitter._merge_splits(splits, separator=" ") assert output == expected_output def test_create_documents() -> None: """Test create documents method.""" texts = ["foo bar", "baz"] splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=0) docs = splitter.create_documents(texts) expected_docs = [ Document(page_content="foo"), Document(page_content="bar"), Document(page_content="baz"), ] assert docs == expected_docs def test_create_documents_with_metadata() -> None: """Test create documents with metadata method.""" texts = ["foo bar", "baz"] splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=0) docs = splitter.create_documents(texts, [{"source": "1"}, {"source": "2"}]) expected_docs = [ Document(page_content="foo", metadata={"source": "1"}), Document(page_content="bar", metadata={"source": "1"}), Document(page_content="baz", metadata={"source": "2"}), ] assert docs == expected_docs def test_create_documents_with_start_index() -> None: """Test create documents method.""" texts = ["foo bar baz 123"] splitter = CharacterTextSplitter( separator=" ", chunk_size=7, chunk_overlap=3, add_start_index=True ) docs = splitter.create_documents(texts) expected_docs = [ Document(page_content="foo bar", metadata={"start_index": 0}), Document(page_content="bar baz", metadata={"start_index": 4}), Document(page_content="baz 123", metadata={"start_index": 8}), ] assert docs == expected_docs def test_metadata_not_shallow() -> None: """Test that metadatas are not shallow.""" texts = ["foo bar"] splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=0) docs = splitter.create_documents(texts, [{"source": "1"}]) expected_docs = [ Document(page_content="foo", metadata={"source": "1"}), Document(page_content="bar", metadata={"source": "1"}), ] assert docs == expected_docs docs[0].metadata["foo"] = 1 assert docs[0].metadata == {"source": "1", "foo": 1} assert docs[1].metadata == {"source": "1"} def test_iterative_text_splitter_keep_separator() -> None: chunk_size = 5 output = __test_iterative_text_splitter(chunk_size=chunk_size, keep_separator=True) assert output == [ "....5", "X..3", "Y...4", "X....5", "Y...", ] def test_iterative_text_splitter_discard_separator() -> None: chunk_size = 5 output = __test_iterative_text_splitter(chunk_size=chunk_size, keep_separator=False) assert output == [ "....5", "..3", "...4", "....5", "...", ] def __test_iterative_text_splitter(chunk_size: int, keep_separator: bool) -> List[str]: chunk_size += 1 if keep_separator else 0 splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=0, separators=["X", "Y"], keep_separator=keep_separator, ) text = "....5X..3Y...4X....5Y..." output = splitter.split_text(text) for chunk in output: assert len(chunk) <= chunk_size, f"Chunk is larger than {chunk_size}" return output def test_iterative_text_splitter() -> None: """Test iterative text splitter.""" text = """Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. This is a weird text to write, but gotta test the splittingggg some how. Bye!\n\n-H.""" splitter = RecursiveCharacterTextSplitter(chunk_size=10, chunk_overlap=1) output = splitter.split_text(text) expected_output = [ "Hi.", "I'm", "Harrison.", "How? Are?", "You?", "Okay then", "f f f f.", "This is a", "weird", "text to", "write,", "but gotta", "test the", "splitting", "gggg", "some how.", "Bye!", "-H.", ] assert output == expected_output def test_split_documents() -> None: """Test split_documents.""" splitter = CharacterTextSplitter(separator="", chunk_size=1, chunk_overlap=0) docs = [ Document(page_content="foo", metadata={"source": "1"}), Document(page_content="bar", metadata={"source": "2"}), Document(page_content="baz", metadata={"source": "1"}), ] expected_output = [ Document(page_content="f", metadata={"source": "1"}), Document(page_content="o", metadata={"source": "1"}), Document(page_content="o", metadata={"source": "1"}), Document(page_content="b", metadata={"source": "2"}), Document(page_content="a", metadata={"source": "2"}), Document(page_content="r", metadata={"source": "2"}), Document(page_content="b", metadata={"source": "1"}), Document(page_content="a", metadata={"source": "1"}), Document(page_content="z", metadata={"source": "1"}), ] assert splitter.split_documents(docs) == expected_output def test_python_text_splitter() -> None: splitter = PythonCodeTextSplitter(chunk_size=30, chunk_overlap=0) splits = splitter.split_text(FAKE_PYTHON_TEXT) split_0 = """class Foo:\n\n def bar():""" split_1 = """def foo():""" split_2 = """def testing_func():""" split_3 = """def bar():""" expected_splits = [split_0, split_1, split_2, split_3] assert splits == expected_splits CHUNK_SIZE = 16 def test_python_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.PYTHON, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ def hello_world(): print("Hello, World!") # Call the function hello_world() """ chunks = splitter.split_text(code) assert chunks == [ "def", "hello_world():", 'print("Hello,', 'World!")', "# Call the", "function", "hello_world()", ] def test_golang_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.GO, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ package main import "fmt" func helloWorld() { fmt.Println("Hello, World!") } func main() { helloWorld() } """ chunks = splitter.split_text(code) assert chunks == [ "package main", 'import "fmt"', "func", "helloWorld() {", 'fmt.Println("He', "llo,", 'World!")', "}", "func main() {", "helloWorld()", "}", ] def test_rst_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.RST, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ Sample Document =============== Section ------- This is the content of the section. Lists ----- - Item 1 - Item 2 - Item 3 Comment ******* Not a comment .. This is a comment """ chunks = splitter.split_text(code) assert chunks == [ "Sample Document", "===============", "Section", "-------", "This is the", "content of the", "section.", "Lists", "-----", "- Item 1", "- Item 2", "- Item 3", "Comment", "*******", "Not a comment", ".. This is a", "comment", ] # Special test for special characters code = "harry\n***\nbabylon is" chunks = splitter.split_text(code) assert chunks == ["harry", "***\nbabylon is"] def test_proto_file_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.PROTO, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ syntax = "proto3"; package example; message Person { string name = 1; int32 age = 2; repeated string hobbies = 3; } """ chunks = splitter.split_text(code) assert chunks == [ "syntax =", '"proto3";', "package", "example;", "message Person", "{", "string name", "= 1;", "int32 age =", "2;", "repeated", "string hobbies", "= 3;", "}", ] def test_javascript_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.JS, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ function helloWorld() { console.log("Hello, World!"); } // Call the function helloWorld(); """ chunks = splitter.split_text(code) assert chunks == [ "function", "helloWorld() {", 'console.log("He', "llo,", 'World!");', "}", "// Call the", "function", "helloWorld();", ] def test_cobol_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.COBOL, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ IDENTIFICATION DIVISION. PROGRAM-ID. HelloWorld. DATA DIVISION. WORKING-STORAGE SECTION. 01 GREETING PIC X(12) VALUE 'Hello, World!'. PROCEDURE DIVISION. DISPLAY GREETING. STOP RUN. """ chunks = splitter.split_text(code) assert chunks == [ "IDENTIFICATION", "DIVISION.", "PROGRAM-ID.", "HelloWorld.", "DATA DIVISION.", "WORKING-STORAGE", "SECTION.", "01 GREETING", "PIC X(12)", "VALUE 'Hello,", "World!'.", "PROCEDURE", "DIVISION.", "DISPLAY", "GREETING.", "STOP RUN.", ] def test_typescript_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.TS, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ function helloWorld(): void { console.log("Hello, World!"); } // Call the function helloWorld(); """ chunks = splitter.split_text(code) assert chunks == [ "function", "helloWorld():", "void {", 'console.log("He', "llo,", 'World!");', "}", "// Call the", "function", "helloWorld();", ] def test_java_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.JAVA, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ public class HelloWorld { public static void main(String[] args) { System.out.println("Hello, World!"); } } """ chunks = splitter.split_text(code) assert chunks == [ "public class", "HelloWorld {", "public", "static void", "main(String[]", "args) {", "System.out.prin", 'tln("Hello,', 'World!");', "}\n}", ] def test_kotlin_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.KOTLIN, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ class HelloWorld { companion object { @JvmStatic fun main(args: Array<String>) { println("Hello, World!") } } } """ chunks = splitter.split_text(code) assert chunks == [ "class", "HelloWorld {", "companion", "object {", "@JvmStatic", "fun", "main(args:", "Array<String>)", "{", 'println("Hello,', 'World!")', "}\n }", "}", ] def test_csharp_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.CSHARP, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ using System; class Program { static void Main() { int age = 30; // Change the age value as needed // Categorize the age without any console output if (age < 18) { // Age is under 18 } else if (age >= 18 && age < 65) { // Age is an adult } else { // Age is a senior citizen } } } """ chunks = splitter.split_text(code) assert chunks == [ "using System;", "class Program\n{", "static void", "Main()", "{", "int age", "= 30; // Change", "the age value", "as needed", "//", "Categorize the", "age without any", "console output", "if (age", "< 18)", "{", "//", "Age is under 18", "}", "else if", "(age >= 18 &&", "age < 65)", "{", "//", "Age is an adult", "}", "else", "{", "//", "Age is a senior", "citizen", "}\n }", "}", ] def test_cpp_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.CPP, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ #include <iostream> int main() { std::cout << "Hello, World!" << std::endl; return 0; } """ chunks = splitter.split_text(code) assert chunks == [ "#include", "<iostream>", "int main() {", "std::cout", '<< "Hello,', 'World!" <<', "std::endl;", "return 0;\n}", ] def test_scala_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.SCALA, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ object HelloWorld { def main(args: Array[String]): Unit = { println("Hello, World!") } } """ chunks = splitter.split_text(code) assert chunks == [ "object", "HelloWorld {", "def", "main(args:", "Array[String]):", "Unit = {", 'println("Hello,', 'World!")', "}\n}", ] def test_ruby_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.RUBY, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ def hello_world puts "Hello, World!" end hello_world """ chunks = splitter.split_text(code) assert chunks == [ "def hello_world", 'puts "Hello,', 'World!"', "end", "hello_world", ] def test_php_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.PHP, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ <?php function hello_world() { echo "Hello, World!"; } hello_world(); ?> """ chunks = splitter.split_text(code) assert chunks == [ "<?php", "function", "hello_world() {", "echo", '"Hello,', 'World!";', "}", "hello_world();", "?>", ] def test_swift_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.SWIFT, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ func helloWorld() { print("Hello, World!") } helloWorld() """ chunks = splitter.split_text(code) assert chunks == [ "func", "helloWorld() {", 'print("Hello,', 'World!")', "}", "helloWorld()", ] def test_rust_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.RUST, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ fn main() { println!("Hello, World!"); } """ chunks = splitter.split_text(code) assert chunks == ["fn main() {", 'println!("Hello', ",", 'World!");', "}"] def test_markdown_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.MARKDOWN, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ # Sample Document ## Section This is the content of the section. ## Lists - Item 1 - Item 2 - Item 3 ### Horizontal lines *********** ____________ ------------------- #### Code blocks ``` This is a code block # sample code a = 1 b = 2 ``` """ chunks = splitter.split_text(code) assert chunks == [ "# Sample", "Document", "## Section", "This is the", "content of the", "section.", "## Lists", "- Item 1", "- Item 2", "- Item 3", "### Horizontal", "lines", "***********", "____________", "---------------", "----", "#### Code", "blocks", "```", "This is a code", "block", "# sample code", "a = 1\nb = 2", "```", ] # Special test for special characters code = "harry\n***\nbabylon is" chunks = splitter.split_text(code) assert chunks == ["harry", "***\nbabylon is"] def test_latex_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.LATEX, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """ Hi Harrison! \\chapter{1} """ chunks = splitter.split_text(code) assert chunks == ["Hi Harrison!", "\\chapter{1}"] def test_html_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.HTML, chunk_size=60, chunk_overlap=0 ) code = """ <h1>Sample Document</h1> <h2>Section</h2> <p id="1234">Reference content.</p> <h2>Lists</h2> <ul> <li>Item 1</li> <li>Item 2</li> <li>Item 3</li> </ul> <h3>A block</h3> <div class="amazing"> <p>Some text</p> <p>Some more text</p> </div> """ chunks = splitter.split_text(code) assert chunks == [ "<h1>Sample Document</h1>\n <h2>Section</h2>", '<p id="1234">Reference content.</p>', "<h2>Lists</h2>\n <ul>", "<li>Item 1</li>\n <li>Item 2</li>", "<li>Item 3</li>\n </ul>", "<h3>A block</h3>", '<div class="amazing">', "<p>Some text</p>", "<p>Some more text</p>\n </div>", ] def test_md_header_text_splitter_1() -> None: """Test markdown splitter by header: Case 1.""" markdown_document = ( "# Foo\n\n" " ## Bar\n\n" "Hi this is Jim\n\n" "Hi this is Joe\n\n" " ## Baz\n\n" " Hi this is Molly" ) headers_to_split_on = [ ("#", "Header 1"), ("##", "Header 2"), ] markdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on, ) output = markdown_splitter.split_text(markdown_document) expected_output = [ Document( page_content="Hi this is Jim \nHi this is Joe", metadata={"Header 1": "Foo", "Header 2": "Bar"}, ), Document( page_content="Hi this is Molly", metadata={"Header 1": "Foo", "Header 2": "Baz"}, ), ] assert output == expected_output def test_md_header_text_splitter_2() -> None: """Test markdown splitter by header: Case 2.""" markdown_document = ( "# Foo\n\n" " ## Bar\n\n" "Hi this is Jim\n\n" "Hi this is Joe\n\n" " ### Boo \n\n" " Hi this is Lance \n\n" " ## Baz\n\n" " Hi this is Molly" ) headers_to_split_on = [ ("#", "Header 1"), ("##", "Header 2"), ("###", "Header 3"), ] markdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on, ) output = markdown_splitter.split_text(markdown_document) expected_output = [ Document( page_content="Hi this is Jim \nHi this is Joe", metadata={"Header 1": "Foo", "Header 2": "Bar"}, ), Document( page_content="Hi this is Lance", metadata={"Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo"}, ), Document( page_content="Hi this is Molly", metadata={"Header 1": "Foo", "Header 2": "Baz"}, ), ] assert output == expected_output def test_md_header_text_splitter_3() -> None: """Test markdown splitter by header: Case 3.""" markdown_document = ( "# Foo\n\n" " ## Bar\n\n" "Hi this is Jim\n\n" "Hi this is Joe\n\n" " ### Boo \n\n" " Hi this is Lance \n\n" " #### Bim \n\n" " Hi this is John \n\n" " ## Baz\n\n" " Hi this is Molly" ) headers_to_split_on = [ ("#", "Header 1"), ("##", "Header 2"), ("###", "Header 3"), ("####", "Header 4"), ] markdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on, ) output = markdown_splitter.split_text(markdown_document) expected_output = [ Document( page_content="Hi this is Jim \nHi this is Joe", metadata={"Header 1": "Foo", "Header 2": "Bar"}, ), Document( page_content="Hi this is Lance", metadata={"Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo"}, ), Document( page_content="Hi this is John", metadata={ "Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo", "Header 4": "Bim", }, ), Document( page_content="Hi this is Molly", metadata={"Header 1": "Foo", "Header 2": "Baz"}, ), ] assert output == expected_output @pytest.mark.parametrize("fence", [("```"), ("~~~")]) def test_md_header_text_splitter_fenced_code_block(fence: str) -> None: """Test markdown splitter by header: Fenced code block.""" markdown_document = ( "# This is a Header\n\n" f"{fence}\n" "foo()\n" "# Not a header\n" "bar()\n" f"{fence}" ) headers_to_split_on = [ ("#", "Header 1"), ("##", "Header 2"), ] markdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on, ) output = markdown_splitter.split_text(markdown_document) expected_output = [ Document( page_content=f"{fence}\nfoo()\n# Not a header\nbar()\n{fence}", metadata={"Header 1": "This is a Header"}, ), ] assert output == expected_output @pytest.mark.parametrize(["fence", "other_fence"], [("```", "~~~"), ("~~~", "```")]) def test_md_header_text_splitter_fenced_code_block_interleaved( fence: str, other_fence: str ) -> None: """Test markdown splitter by header: Interleaved fenced code block.""" markdown_document = ( "# This is a Header\n\n" f"{fence}\n" "foo\n" "# Not a header\n" f"{other_fence}\n" "# Not a header\n" f"{fence}" ) headers_to_split_on = [ ("#", "Header 1"), ("##", "Header 2"), ] markdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on, ) output = markdown_splitter.split_text(markdown_document) expected_output = [ Document( page_content=( f"{fence}\nfoo\n# Not a header\n{other_fence}\n# Not a header\n{fence}" ), metadata={"Header 1": "This is a Header"}, ), ] assert output == expected_output def test_solidity_code_splitter() -> None: splitter = RecursiveCharacterTextSplitter.from_language( Language.SOL, chunk_size=CHUNK_SIZE, chunk_overlap=0 ) code = """pragma solidity ^0.8.20; contract HelloWorld { function add(uint a, uint b) pure public returns(uint) { return a + b; } } """ chunks = splitter.split_text(code) assert chunks == [ "pragma solidity", "^0.8.20;", "contract", "HelloWorld {", "function", "add(uint a,", "uint b) pure", "public", "returns(uint) {", "return a", "+ b;", "}\n }", ] @pytest.mark.requires("lxml") def test_html_header_text_splitter(tmp_path: Path) -> None: splitter = HTMLHeaderTextSplitter( headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")] ) content = """ <h1>Sample Document</h1> <h2>Section</h2> <p id="1234">Reference content.</p> <h2>Lists</h2> <ul> <li>Item 1</li> <li>Item 2</li> <li>Item 3</li> </ul> <h3>A block</h3> <div class="amazing"> <p>Some text</p> <p>Some more text</p> </div> """ docs = splitter.split_text(content) expected = [ Document( page_content="Reference content.", metadata={"Header 1": "Sample Document", "Header 2": "Section"}, ), Document( page_content="Item 1 Item 2 Item 3 \nSome text \nSome more text", metadata={"Header 1": "Sample Document", "Header 2": "Lists"}, ), ] assert docs == expected with open(tmp_path / "doc.html", "w") as tmp: tmp.write(content) docs_from_file = splitter.split_text_from_file(tmp_path / "doc.html") assert docs_from_file == expected def test_split_text_on_tokens() -> None: """Test splitting by tokens per chunk.""" text = "foo bar baz 123" tokenizer = Tokenizer( chunk_overlap=3, tokens_per_chunk=7, decode=(lambda it: "".join(chr(i) for i in it)), encode=(lambda it: [ord(c) for c in it]), ) output = split_text_on_tokens(text=text, tokenizer=tokenizer) expected_output = ["foo bar", "bar baz", "baz 123"] assert output == expected_output
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~imsdb.py
from typing import List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.web_base import WebBaseLoader class IMSDbLoader(WebBaseLoader): """Load `IMSDb` webpages.""" def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() text = soup.select_one("td[class='scrtext']").text metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~retrievers~arcee.py
from typing import Any, Dict, List, Optional from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import Extra, SecretStr, root_validator from libs.core.langchain_core.retrievers import BaseRetriever from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env from langchain_community.utilities.arcee import ArceeWrapper, DALMFilter class ArceeRetriever(BaseRetriever): """Document retriever for Arcee's Domain Adapted Language Models (DALMs). To use, set the ``ARCEE_API_KEY`` environment variable with your Arcee API key, or pass ``arcee_api_key`` as a named parameter. Example: .. code-block:: python from langchain_community.retrievers import ArceeRetriever retriever = ArceeRetriever( model="DALM-PubMed", arcee_api_key="ARCEE-API-KEY" ) documents = retriever.get_relevant_documents("AI-driven music therapy") """ _client: Optional[ArceeWrapper] = None #: :meta private: """Arcee client.""" arcee_api_key: SecretStr """Arcee API Key""" model: str """Arcee DALM name""" arcee_api_url: str = "https://api.arcee.ai" """Arcee API URL""" arcee_api_version: str = "v2" """Arcee API Version""" arcee_app_url: str = "https://app.arcee.ai" """Arcee App URL""" model_kwargs: Optional[Dict[str, Any]] = None """Keyword arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid underscore_attrs_are_private = True def __init__(self, **data: Any) -> None: """Initializes private fields.""" super().__init__(**data) self._client = ArceeWrapper( arcee_api_key=self.arcee_api_key.get_secret_value(), arcee_api_url=self.arcee_api_url, arcee_api_version=self.arcee_api_version, model_kwargs=self.model_kwargs, model_name=self.model, ) self._client.validate_model_training_status() @root_validator() def validate_environments(cls, values: Dict) -> Dict: """Validate Arcee environment variables.""" # validate env vars values["arcee_api_key"] = convert_to_secret_str( get_from_dict_or_env( values, "arcee_api_key", "ARCEE_API_KEY", ) ) values["arcee_api_url"] = get_from_dict_or_env( values, "arcee_api_url", "ARCEE_API_URL", ) values["arcee_app_url"] = get_from_dict_or_env( values, "arcee_app_url", "ARCEE_APP_URL", ) values["arcee_api_version"] = get_from_dict_or_env( values, "arcee_api_version", "ARCEE_API_VERSION", ) # validate model kwargs if values["model_kwargs"]: kw = values["model_kwargs"] # validate size if kw.get("size") is not None: if not kw.get("size") >= 0: raise ValueError("`size` must not be negative.") # validate filters if kw.get("filters") is not None: if not isinstance(kw.get("filters"), List): raise ValueError("`filters` must be a list.") for f in kw.get("filters"): DALMFilter(**f) return values def _get_relevant_documents( self, query: str, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any ) -> List[Document]: """Retrieve {size} contexts with your retriever for a given query Args: query: Query to submit to the model size: The max number of context results to retrieve. Defaults to 3. (Can be less if filters are provided). filters: Filters to apply to the context dataset. """ try: if not self._client: raise ValueError("Client is not initialized.") return self._client.retrieve(query=query, **kwargs) except Exception as e: raise ValueError(f"Error while retrieving documents: {e}") from e
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~retrievers~test_qdrant_sparse_vector_retriever.py
import random import uuid from typing import List, Tuple import pytest from libs.core.langchain_core.documents import Document from langchain_community.retrievers import QdrantSparseVectorRetriever from langchain_community.vectorstores.qdrant import QdrantException def consistent_fake_sparse_encoder( query: str, size: int = 100, density: float = 0.7 ) -> Tuple[List[int], List[float]]: """ Generates a consistent fake sparse vector. Parameters: - query (str): The query string to make the function deterministic. - size (int): The size of the vector to generate. - density (float): The density of the vector to generate. Returns: - indices (list): List of indices where the non-zero elements are located. - values (list): List of corresponding float values at the non-zero indices. """ # Ensure density is within the valid range [0, 1] density = max(0.0, min(1.0, density)) # Use a deterministic seed based on the query seed = hash(query) random.seed(seed) # Calculate the number of non-zero elements based on density num_non_zero_elements = int(size * density) # Generate random indices without replacement indices = sorted(random.sample(range(size), num_non_zero_elements)) # Generate random float values for the non-zero elements values = [random.uniform(0.0, 1.0) for _ in range(num_non_zero_elements)] return indices, values @pytest.fixture def retriever() -> QdrantSparseVectorRetriever: from qdrant_client import QdrantClient, models client = QdrantClient(location=":memory:") collection_name = uuid.uuid4().hex vector_name = uuid.uuid4().hex client.recreate_collection( collection_name, vectors_config={}, sparse_vectors_config={ vector_name: models.SparseVectorParams( index=models.SparseIndexParams( on_disk=False, ) ) }, ) return QdrantSparseVectorRetriever( client=client, collection_name=collection_name, sparse_vector_name=vector_name, sparse_encoder=consistent_fake_sparse_encoder, ) def test_invalid_collection_name(retriever: QdrantSparseVectorRetriever) -> None: with pytest.raises(QdrantException) as e: QdrantSparseVectorRetriever( client=retriever.client, collection_name="invalid collection", sparse_vector_name=retriever.sparse_vector_name, sparse_encoder=consistent_fake_sparse_encoder, ) assert "does not exist" in str(e.value) def test_invalid_sparse_vector_name(retriever: QdrantSparseVectorRetriever) -> None: with pytest.raises(QdrantException) as e: QdrantSparseVectorRetriever( client=retriever.client, collection_name=retriever.collection_name, sparse_vector_name="invalid sparse vector", sparse_encoder=consistent_fake_sparse_encoder, ) assert "does not contain sparse vector" in str(e.value) def test_add_documents(retriever: QdrantSparseVectorRetriever) -> None: documents = [ Document(page_content="hello world", metadata={"a": 1}), Document(page_content="foo bar", metadata={"b": 2}), Document(page_content="baz qux", metadata={"c": 3}), ] ids = retriever.add_documents(documents) assert retriever.client.count(retriever.collection_name, exact=True).count == 3 documents = [ Document(page_content="hello world"), Document(page_content="foo bar"), Document(page_content="baz qux"), ] ids = retriever.add_documents(documents) assert len(ids) == 3 assert retriever.client.count(retriever.collection_name, exact=True).count == 6 def test_add_texts(retriever: QdrantSparseVectorRetriever) -> None: retriever.add_texts( ["hello world", "foo bar", "baz qux"], [{"a": 1}, {"b": 2}, {"c": 3}] ) assert retriever.client.count(retriever.collection_name, exact=True).count == 3 retriever.add_texts(["hello world", "foo bar", "baz qux"]) assert retriever.client.count(retriever.collection_name, exact=True).count == 6 def test_get_relevant_documents(retriever: QdrantSparseVectorRetriever) -> None: retriever.add_texts(["Hai there!", "Hello world!", "Foo bar baz!"]) expected = [Document(page_content="Hai there!")] retriever.k = 1 results = retriever.get_relevant_documents("Hai there!") assert len(results) == retriever.k assert results == expected assert retriever.get_relevant_documents("Hai there!") == expected def test_get_relevant_documents_with_filter( retriever: QdrantSparseVectorRetriever, ) -> None: from qdrant_client import models retriever.add_texts( ["Hai there!", "Hello world!", "Foo bar baz!"], [ {"value": 1}, {"value": 2}, {"value": 3}, ], ) retriever.filter = models.Filter( must=[ models.FieldCondition( key="metadata.value", match=models.MatchValue(value=2) ) ] ) results = retriever.get_relevant_documents("Some query") assert results[0] == Document(page_content="Hello world!", metadata={"value": 2})
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~edenai~edenai_base_tool.py
from __future__ import annotations import logging from abc import abstractmethod from typing import Any, Dict, List, Optional import requests from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import root_validator from libs.core.langchain_core.tools import BaseTool from libs.core.langchain_core.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class EdenaiTool(BaseTool): """ the base tool for all the EdenAI Tools . you should have the environment variable ``EDENAI_API_KEY`` set with your API token. You can find your token here: https://app.edenai.run/admin/account/settings """ feature: str subfeature: str edenai_api_key: Optional[str] = None is_async: bool = False providers: List[str] """provider to use for the API call.""" @root_validator(allow_reuse=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" values["edenai_api_key"] = get_from_dict_or_env( values, "edenai_api_key", "EDENAI_API_KEY" ) return values @staticmethod def get_user_agent() -> str: from langchain_community import __version__ return f"langchain/{__version__}" def _call_eden_ai(self, query_params: Dict[str, Any]) -> str: """ Make an API call to the EdenAI service with the specified query parameters. Args: query_params (dict): The parameters to include in the API call. Returns: requests.Response: The response from the EdenAI API call. """ # faire l'API call headers = { "Authorization": f"Bearer {self.edenai_api_key}", "User-Agent": self.get_user_agent(), } url = f"https://api.edenai.run/v2/{self.feature}/{self.subfeature}" payload = { "providers": str(self.providers), "response_as_dict": False, "attributes_as_list": True, "show_original_response": False, } payload.update(query_params) response = requests.post(url, json=payload, headers=headers) self._raise_on_error(response) try: return self._parse_response(response.json()) except Exception as e: raise RuntimeError(f"An error occurred while running tool: {e}") def _raise_on_error(self, response: requests.Response) -> None: if response.status_code >= 500: raise Exception(f"EdenAI Server: Error {response.status_code}") elif response.status_code >= 400: raise ValueError(f"EdenAI received an invalid payload: {response.text}") elif response.status_code != 200: raise Exception( f"EdenAI returned an unexpected response with status " f"{response.status_code}: {response.text}" ) # case where edenai call succeeded but provider returned an error # (eg: rate limit, server error, etc.) if self.is_async is False: # async call are different and only return a job_id, # not the provider response directly provider_response = response.json()[0] if provider_response.get("status") == "fail": err_msg = provider_response["error"]["message"] raise ValueError(err_msg) @abstractmethod def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: pass @abstractmethod def _parse_response(self, response: Any) -> str: """Take a dict response and condense it's data in a human readable string""" pass def _get_edenai(self, url: str) -> requests.Response: headers = { "accept": "application/json", "authorization": f"Bearer {self.edenai_api_key}", "User-Agent": self.get_user_agent(), } response = requests.get(url, headers=headers) self._raise_on_error(response) return response def _parse_json_multilevel( self, extracted_data: dict, formatted_list: list, level: int = 0 ) -> None: for section, subsections in extracted_data.items(): indentation = " " * level if isinstance(subsections, str): subsections = subsections.replace("\n", ",") formatted_list.append(f"{indentation}{section} : {subsections}") elif isinstance(subsections, list): formatted_list.append(f"{indentation}{section} : ") self._list_handling(subsections, formatted_list, level + 1) elif isinstance(subsections, dict): formatted_list.append(f"{indentation}{section} : ") self._parse_json_multilevel(subsections, formatted_list, level + 1) def _list_handling( self, subsection_list: list, formatted_list: list, level: int ) -> None: for list_item in subsection_list: if isinstance(list_item, dict): self._parse_json_multilevel(list_item, formatted_list, level) elif isinstance(list_item, list): self._list_handling(list_item, formatted_list, level + 1) else: formatted_list.append(f"{' ' * level}{list_item}")
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~slack~get_message.py
import json import logging from typing import Optional, Type from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.tools.slack.base import SlackBaseTool class SlackGetMessageSchema(BaseModel): """Input schema for SlackGetMessages.""" channel_id: str = Field( ..., description="The channel id, private group, or IM channel to send message to.", ) class SlackGetMessage(SlackBaseTool): """Tool that gets Slack messages.""" name: str = "get_messages" description: str = "Use this tool to get messages from a channel." args_schema: Type[SlackGetMessageSchema] = SlackGetMessageSchema def _run( self, channel_id: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: logging.getLogger(__name__) try: result = self.client.conversations_history(channel=channel_id) messages = result["messages"] filtered_messages = [ {key: message[key] for key in ("user", "text", "ts")} for message in messages if "user" in message and "text" in message and "ts" in message ] return json.dumps(filtered_messages) except Exception as e: return "Error creating conversation: {}".format(e)
[ "Use this tool to get messages from a channel." ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~google_speech_to_text.py
from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.utilities.vertexai import get_client_info if TYPE_CHECKING: from google.cloud.speech_v2 import RecognitionConfig from google.protobuf.field_mask_pb2 import FieldMask class GoogleSpeechToTextLoader(BaseLoader): """ Loader for Google Cloud Speech-to-Text audio transcripts. It uses the Google Cloud Speech-to-Text API to transcribe audio files and loads the transcribed text into one or more Documents, depending on the specified format. To use, you should have the ``google-cloud-speech`` python package installed. Audio files can be specified via a Google Cloud Storage uri or a local file path. For a detailed explanation of Google Cloud Speech-to-Text, refer to the product documentation. https://cloud.google.com/speech-to-text """ def __init__( self, project_id: str, file_path: str, location: str = "us-central1", recognizer_id: str = "_", config: Optional[RecognitionConfig] = None, config_mask: Optional[FieldMask] = None, ): """ Initializes the GoogleSpeechToTextLoader. Args: project_id: Google Cloud Project ID. file_path: A Google Cloud Storage URI or a local file path. location: Speech-to-Text recognizer location. recognizer_id: Speech-to-Text recognizer id. config: Recognition options and features. For more information: https://cloud.google.com/python/docs/reference/speech/latest/google.cloud.speech_v2.types.RecognitionConfig config_mask: The list of fields in config that override the values in the ``default_recognition_config`` of the recognizer during this recognition request. For more information: https://cloud.google.com/python/docs/reference/speech/latest/google.cloud.speech_v2.types.RecognizeRequest """ try: from google.api_core.client_options import ClientOptions from google.cloud.speech_v2 import ( AutoDetectDecodingConfig, RecognitionConfig, RecognitionFeatures, SpeechClient, ) except ImportError as exc: raise ImportError( "Could not import google-cloud-speech python package. " "Please install it with `pip install google-cloud-speech`." ) from exc self.project_id = project_id self.file_path = file_path self.location = location self.recognizer_id = recognizer_id # Config must be set in speech recognition request. self.config = config or RecognitionConfig( auto_decoding_config=AutoDetectDecodingConfig(), language_codes=["en-US"], model="chirp", features=RecognitionFeatures( # Automatic punctuation could be useful for language applications enable_automatic_punctuation=True, ), ) self.config_mask = config_mask self._client = SpeechClient( client_info=get_client_info(module="speech-to-text"), client_options=( ClientOptions(api_endpoint=f"{location}-speech.googleapis.com") if location != "global" else None ), ) self._recognizer_path = self._client.recognizer_path( project_id, location, recognizer_id ) def load(self) -> List[Document]: """Transcribes the audio file and loads the transcript into documents. It uses the Google Cloud Speech-to-Text API to transcribe the audio file and blocks until the transcription is finished. """ try: from google.cloud.speech_v2 import RecognizeRequest except ImportError as exc: raise ImportError( "Could not import google-cloud-speech python package. " "Please install it with `pip install google-cloud-speech`." ) from exc request = RecognizeRequest( recognizer=self._recognizer_path, config=self.config, config_mask=self.config_mask, ) if "gs://" in self.file_path: request.uri = self.file_path else: with open(self.file_path, "rb") as f: request.content = f.read() response = self._client.recognize(request=request) return [ Document( page_content=result.alternatives[0].transcript, metadata={ "language_code": result.language_code, "result_end_offset": result.result_end_offset, }, ) for result in response.results ]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~infinity.py
"""written under MIT Licence, Michael Feil 2023.""" import asyncio from concurrent.futures import ThreadPoolExecutor from typing import Any, Callable, Dict, List, Optional, Tuple import aiohttp import numpy as np import requests from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env __all__ = ["InfinityEmbeddings"] class InfinityEmbeddings(BaseModel, Embeddings): """Embedding models for self-hosted https://github.com/michaelfeil/infinity This should also work for text-embeddings-inference and other self-hosted openai-compatible servers. Infinity is a class to interact with Embedding Models on https://github.com/michaelfeil/infinity Example: .. code-block:: python from langchain_community.embeddings import InfinityEmbeddings InfinityEmbeddings( model="BAAI/bge-small", infinity_api_url="http://localhost:7797/v1", ) """ model: str "Underlying Infinity model id." infinity_api_url: str = "http://localhost:7797/v1" """Endpoint URL to use.""" client: Any = None #: :meta private: """Infinity client.""" # LLM call kwargs class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(allow_reuse=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["infinity_api_url"] = get_from_dict_or_env( values, "infinity_api_url", "INFINITY_API_URL" ) values["client"] = TinyAsyncOpenAIInfinityEmbeddingClient( host=values["infinity_api_url"], ) return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Infinity's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed( model=self.model, texts=texts, ) return embeddings async def aembed_documents(self, texts: List[str]) -> List[List[float]]: """Async call out to Infinity's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = await self.client.aembed( model=self.model, texts=texts, ) return embeddings def embed_query(self, text: str) -> List[float]: """Call out to Infinity's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed_documents([text])[0] async def aembed_query(self, text: str) -> List[float]: """Async call out to Infinity's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embeddings = await self.aembed_documents([text]) return embeddings[0] class TinyAsyncOpenAIInfinityEmbeddingClient: #: :meta private: """A helper tool to embed Infinity. Not part of Langchain's stable API, direct use discouraged. Example: .. code-block:: python mini_client = TinyAsyncInfinityEmbeddingClient( ) embeds = mini_client.embed( model="BAAI/bge-small", text=["doc1", "doc2"] ) # or embeds = await mini_client.aembed( model="BAAI/bge-small", text=["doc1", "doc2"] ) """ def __init__( self, host: str = "http://localhost:7797/v1", aiosession: Optional[aiohttp.ClientSession] = None, ) -> None: self.host = host self.aiosession = aiosession if self.host is None or len(self.host) < 3: raise ValueError(" param `host` must be set to a valid url") self._batch_size = 128 @staticmethod def _permute( texts: List[str], sorter: Callable = len ) -> Tuple[List[str], Callable]: """Sort texts in ascending order, and delivers a lambda expr, which can sort a same length list https://github.com/UKPLab/sentence-transformers/blob/ c5f93f70eca933c78695c5bc686ceda59651ae3b/sentence_transformers/SentenceTransformer.py#L156 Args: texts (List[str]): _description_ sorter (Callable, optional): _description_. Defaults to len. Returns: Tuple[List[str], Callable]: _description_ Example: ``` texts = ["one","three","four"] perm_texts, undo = self._permute(texts) texts == undo(perm_texts) ``` """ if len(texts) == 1: # special case query return texts, lambda t: t length_sorted_idx = np.argsort([-sorter(sen) for sen in texts]) texts_sorted = [texts[idx] for idx in length_sorted_idx] return texts_sorted, lambda unsorted_embeddings: [ # noqa E731 unsorted_embeddings[idx] for idx in np.argsort(length_sorted_idx) ] def _batch(self, texts: List[str]) -> List[List[str]]: """ splits Lists of text parts into batches of size max `self._batch_size` When encoding vector database, Args: texts (List[str]): List of sentences self._batch_size (int, optional): max batch size of one request. Returns: List[List[str]]: Batches of List of sentences """ if len(texts) == 1: # special case query return [texts] batches = [] for start_index in range(0, len(texts), self._batch_size): batches.append(texts[start_index : start_index + self._batch_size]) return batches @staticmethod def _unbatch(batch_of_texts: List[List[Any]]) -> List[Any]: if len(batch_of_texts) == 1 and len(batch_of_texts[0]) == 1: # special case query return batch_of_texts[0] texts = [] for sublist in batch_of_texts: texts.extend(sublist) return texts def _kwargs_post_request(self, model: str, texts: List[str]) -> Dict[str, Any]: """Build the kwargs for the Post request, used by sync Args: model (str): _description_ texts (List[str]): _description_ Returns: Dict[str, Collection[str]]: _description_ """ return dict( url=f"{self.host}/embeddings", headers={ # "accept": "application/json", "content-type": "application/json", }, json=dict( input=texts, model=model, ), ) def _sync_request_embed( self, model: str, batch_texts: List[str] ) -> List[List[float]]: response = requests.post( **self._kwargs_post_request(model=model, texts=batch_texts) ) if response.status_code != 200: raise Exception( f"Infinity returned an unexpected response with status " f"{response.status_code}: {response.text}" ) return [e["embedding"] for e in response.json()["data"]] def embed(self, model: str, texts: List[str]) -> List[List[float]]: """call the embedding of model Args: model (str): to embedding model texts (List[str]): List of sentences to embed. Returns: List[List[float]]: List of vectors for each sentence """ perm_texts, unpermute_func = self._permute(texts) perm_texts_batched = self._batch(perm_texts) # Request map_args = ( self._sync_request_embed, [model] * len(perm_texts_batched), perm_texts_batched, ) if len(perm_texts_batched) == 1: embeddings_batch_perm = list(map(*map_args)) else: with ThreadPoolExecutor(32) as p: embeddings_batch_perm = list(p.map(*map_args)) embeddings_perm = self._unbatch(embeddings_batch_perm) embeddings = unpermute_func(embeddings_perm) return embeddings async def _async_request( self, session: aiohttp.ClientSession, kwargs: Dict[str, Any] ) -> List[List[float]]: async with session.post(**kwargs) as response: if response.status != 200: raise Exception( f"Infinity returned an unexpected response with status " f"{response.status}: {response.text}" ) embedding = (await response.json())["embeddings"] return [e["embedding"] for e in embedding] async def aembed(self, model: str, texts: List[str]) -> List[List[float]]: """call the embedding of model, async method Args: model (str): to embedding model texts (List[str]): List of sentences to embed. Returns: List[List[float]]: List of vectors for each sentence """ perm_texts, unpermute_func = self._permute(texts) perm_texts_batched = self._batch(perm_texts) # Request if self.aiosession is None: self.aiosession = aiohttp.ClientSession( trust_env=True, connector=aiohttp.TCPConnector(limit=32) ) async with self.aiosession as session: embeddings_batch_perm = await asyncio.gather( *[ self._async_request( session=session, **self._kwargs_post_request(model=model, texts=t), ) for t in perm_texts_batched ] ) embeddings_perm = self._unbatch(embeddings_batch_perm) embeddings = unpermute_func(embeddings_perm) return embeddings
[ "application/json" ]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~output_parsers~test_json.py
from typing import Any, AsyncIterator, Iterator from libs.core.langchain_core.messages import AIMessageChunk from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser GOOD_JSON = """```json { "foo": "bar" } ```""" JSON_WITH_NEW_LINES = """ ```json { "foo": "bar" } ``` """ JSON_WITH_NEW_LINES_INSIDE = """```json { "foo": "bar" } ```""" JSON_WITH_NEW_LINES_EVERYWHERE = """ ```json { "foo": "bar" } ``` """ TICKS_WITH_NEW_LINES_EVERYWHERE = """ ``` { "foo": "bar" } ``` """ JSON_WITH_MARKDOWN_CODE_BLOCK = """```json { "foo": "```bar```" } ```""" JSON_WITH_MARKDOWN_CODE_BLOCK_AND_NEWLINES = """```json { "action": "Final Answer", "action_input": "```bar\n<div id="1" class=\"value\">\n\ttext\n</div>```" } ```""" JSON_WITH_UNESCAPED_QUOTES_IN_NESTED_JSON = """```json { "action": "Final Answer", "action_input": "{"foo": "bar", "bar": "foo"}" } ```""" JSON_WITH_ESCAPED_QUOTES_IN_NESTED_JSON = """```json { "action": "Final Answer", "action_input": "{\"foo\": \"bar\", \"bar\": \"foo\"}" } ```""" JSON_WITH_PYTHON_DICT = """```json { "action": "Final Answer", "action_input": {"foo": "bar", "bar": "foo"} } ```""" JSON_WITH_ESCAPED_DOUBLE_QUOTES_IN_NESTED_JSON = """```json { "action": "Final Answer", "action_input": "{\\"foo\\": \\"bar\\", \\"bar\\": \\"foo\\"}" } ```""" NO_TICKS = """{ "foo": "bar" }""" NO_TICKS_WHITE_SPACE = """ { "foo": "bar" } """ TEXT_BEFORE = """Thought: I need to use the search tool Action: ``` { "foo": "bar" } ```""" TEXT_AFTER = """``` { "foo": "bar" } ``` This should do the trick""" TEXT_BEFORE_AND_AFTER = """Action: Testing ``` { "foo": "bar" } ``` This should do the trick""" TEST_CASES = [ GOOD_JSON, JSON_WITH_NEW_LINES, JSON_WITH_NEW_LINES_INSIDE, JSON_WITH_NEW_LINES_EVERYWHERE, TICKS_WITH_NEW_LINES_EVERYWHERE, NO_TICKS, NO_TICKS_WHITE_SPACE, TEXT_BEFORE, TEXT_AFTER, ] TEST_CASES_ESCAPED_QUOTES = [ JSON_WITH_UNESCAPED_QUOTES_IN_NESTED_JSON, JSON_WITH_ESCAPED_QUOTES_IN_NESTED_JSON, JSON_WITH_ESCAPED_DOUBLE_QUOTES_IN_NESTED_JSON, ] TEST_CASES_PARTIAL = [ ('{"foo": "bar", "bar": "foo"}', '{"foo": "bar", "bar": "foo"}'), ('{"foo": "bar", "bar": "foo', '{"foo": "bar", "bar": "foo"}'), ('{"foo": "bar", "bar": "foo}', '{"foo": "bar", "bar": "foo}"}'), ('{"foo": "bar", "bar": "foo[', '{"foo": "bar", "bar": "foo["}'), ('{"foo": "bar", "bar": "foo\\"', '{"foo": "bar", "bar": "foo\\""}'), ] STREAMED_TOKENS = """ { " setup ": " Why did the bears start a band called Bears Bears Bears ? " , " punchline ": " Because they wanted to play bear -y good music ! " , " audience ": [ " Haha " , " So funny " ] } """.splitlines() EXPECTED_STREAMED_JSON = [ {}, {"setup": ""}, {"setup": "Why"}, {"setup": "Why did"}, {"setup": "Why did the"}, {"setup": "Why did the bears"}, {"setup": "Why did the bears start"}, {"setup": "Why did the bears start a"}, {"setup": "Why did the bears start a band"}, {"setup": "Why did the bears start a band called"}, {"setup": "Why did the bears start a band called Bears"}, {"setup": "Why did the bears start a band called Bears Bears"}, {"setup": "Why did the bears start a band called Bears Bears Bears"}, {"setup": "Why did the bears start a band called Bears Bears Bears ?"}, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they wanted", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they wanted to", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they wanted to play", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they wanted to play bear", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they wanted to play bear -y", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they wanted to play bear -y good", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they wanted to play bear -y good music", }, { "setup": "Why did the bears start a band called Bears Bears Bears ?", "punchline": "Because they wanted to play bear -y good music !", }, { "punchline": "Because they wanted to play bear -y good music !", "setup": "Why did the bears start a band called Bears Bears Bears ?", "audience": [], }, { "punchline": "Because they wanted to play bear -y good music !", "setup": "Why did the bears start a band called Bears Bears Bears ?", "audience": [""], }, { "punchline": "Because they wanted to play bear -y good music !", "setup": "Why did the bears start a band called Bears Bears Bears ?", "audience": ["Haha"], }, { "punchline": "Because they wanted to play bear -y good music !", "setup": "Why did the bears start a band called Bears Bears Bears ?", "audience": ["Haha", ""], }, { "punchline": "Because they wanted to play bear -y good music !", "setup": "Why did the bears start a band called Bears Bears Bears ?", "audience": ["Haha", "So"], }, { "punchline": "Because they wanted to play bear -y good music !", "setup": "Why did the bears start a band called Bears Bears Bears ?", "audience": ["Haha", "So funny"], }, ] EXPECTED_STREAMED_JSON_DIFF = [ [{"op": "replace", "path": "", "value": {}}], [{"op": "add", "path": "/setup", "value": ""}], [{"op": "replace", "path": "/setup", "value": "Why"}], [{"op": "replace", "path": "/setup", "value": "Why did"}], [{"op": "replace", "path": "/setup", "value": "Why did the"}], [{"op": "replace", "path": "/setup", "value": "Why did the bears"}], [{"op": "replace", "path": "/setup", "value": "Why did the bears start"}], [{"op": "replace", "path": "/setup", "value": "Why did the bears start a"}], [{"op": "replace", "path": "/setup", "value": "Why did the bears start a band"}], [ { "op": "replace", "path": "/setup", "value": "Why did the bears start a band called", } ], [ { "op": "replace", "path": "/setup", "value": "Why did the bears start a band called Bears", } ], [ { "op": "replace", "path": "/setup", "value": "Why did the bears start a band called Bears Bears", } ], [ { "op": "replace", "path": "/setup", "value": "Why did the bears start a band called Bears Bears Bears", } ], [ { "op": "replace", "path": "/setup", "value": "Why did the bears start a band called Bears Bears Bears ?", } ], [{"op": "add", "path": "/punchline", "value": ""}], [{"op": "replace", "path": "/punchline", "value": "Because"}], [{"op": "replace", "path": "/punchline", "value": "Because they"}], [{"op": "replace", "path": "/punchline", "value": "Because they wanted"}], [{"op": "replace", "path": "/punchline", "value": "Because they wanted to"}], [{"op": "replace", "path": "/punchline", "value": "Because they wanted to play"}], [ { "op": "replace", "path": "/punchline", "value": "Because they wanted to play bear", } ], [ { "op": "replace", "path": "/punchline", "value": "Because they wanted to play bear -y", } ], [ { "op": "replace", "path": "/punchline", "value": "Because they wanted to play bear -y good", } ], [ { "op": "replace", "path": "/punchline", "value": "Because they wanted to play bear -y good music", } ], [ { "op": "replace", "path": "/punchline", "value": "Because they wanted to play bear -y good music !", } ], [{"op": "add", "path": "/audience", "value": []}], [{"op": "add", "path": "/audience/0", "value": ""}], [{"op": "replace", "path": "/audience/0", "value": "Haha"}], [{"op": "add", "path": "/audience/1", "value": ""}], [{"op": "replace", "path": "/audience/1", "value": "So"}], [{"op": "replace", "path": "/audience/1", "value": "So funny"}], ] def test_partial_functions_json_output_parser() -> None: def input_iter(_: Any) -> Iterator[AIMessageChunk]: for token in STREAMED_TOKENS: yield AIMessageChunk( content="", additional_kwargs={"function_call": {"arguments": token}} ) chain = input_iter | JsonOutputFunctionsParser() assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON def test_partial_functions_json_output_parser_diff() -> None: def input_iter(_: Any) -> Iterator[AIMessageChunk]: for token in STREAMED_TOKENS: yield AIMessageChunk( content="", additional_kwargs={"function_call": {"arguments": token}} ) chain = input_iter | JsonOutputFunctionsParser(diff=True) assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF async def test_partial_functions_json_output_parser_async() -> None: async def input_iter(_: Any) -> AsyncIterator[AIMessageChunk]: for token in STREAMED_TOKENS: yield AIMessageChunk( content="", additional_kwargs={"function_call": {"arguments": token}} ) chain = input_iter | JsonOutputFunctionsParser() assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON async def test_partial_functions_json_output_parser_diff_async() -> None: async def input_iter(_: Any) -> AsyncIterator[AIMessageChunk]: for token in STREAMED_TOKENS: yield AIMessageChunk( content="", additional_kwargs={"function_call": {"arguments": token}} ) chain = input_iter | JsonOutputFunctionsParser(diff=True) assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON_DIFF
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~ainetwork~rule.py
import builtins import json from typing import Optional, Type from libs.core.langchain_core.callbacks import AsyncCallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType class RuleSchema(BaseModel): """Schema for owner operations.""" type: OperationType = Field(...) path: str = Field(..., description="Path on the blockchain where the rule applies") eval: Optional[str] = Field(None, description="eval string to determine permission") class AINRuleOps(AINBaseTool): """Tool for owner operations.""" name: str = "AINruleOps" description: str = """ Covers the write `rule` for the AINetwork Blockchain database. The SET type specifies write permissions using the `eval` variable as a JavaScript eval string. In order to AINvalueOps with SET at the path, the execution result of the `eval` string must be true. ## Path Rules 1. Allowed characters for directory: `[a-zA-Z_0-9]` 2. Use `$<key>` for template variables as directory. ## Eval String Special Variables - auth.addr: Address of the writer for the path - newData: New data for the path - data: Current data for the path - currentTime: Time in seconds - lastBlockNumber: Latest processed block number ## Eval String Functions - getValue(<path>) - getRule(<path>) - getOwner(<path>) - getFunction(<path>) - evalRule(<path>, <value to set>, auth, currentTime) - evalOwner(<path>, 'write_owner', auth) ## SET Example - type: SET - path: /apps/langchain_project_1/$from/$to/$img - eval: auth.addr===$from&&!getValue('/apps/image_db/'+$img) ## GET Example - type: GET - path: /apps/langchain_project_1 """ # noqa: E501 args_schema: Type[BaseModel] = RuleSchema async def _arun( self, type: OperationType, path: str, eval: Optional[str] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: from ain.types import ValueOnlyTransactionInput try: if type is OperationType.SET: if eval is None: raise ValueError("'eval' is required for SET operation.") res = await self.interface.db.ref(path).setRule( transactionInput=ValueOnlyTransactionInput( value={".rule": {"write": eval}} ) ) elif type is OperationType.GET: res = await self.interface.db.ref(path).getRule() else: raise ValueError(f"Unsupported 'type': {type}.") return json.dumps(res, ensure_ascii=False) except Exception as e: return f"{builtins.type(e).__name__}: {str(e)}"
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~retrievers~test_google_vertex_ai_search.py
"""Test Google Vertex AI Search retriever. You need to create a Vertex AI Search app and populate it with data to run the integration tests. Follow the instructions in the example notebook: google_vertex_ai_search.ipynb to set up the app and configure authentication. Set the following environment variables before the tests: export PROJECT_ID=... - set to your Google Cloud project ID export DATA_STORE_ID=... - the ID of the search engine to use for the test """ import os import pytest from libs.core.langchain_core.documents import Document from langchain_community.retrievers.google_vertex_ai_search import ( GoogleCloudEnterpriseSearchRetriever, GoogleVertexAIMultiTurnSearchRetriever, GoogleVertexAISearchRetriever, ) @pytest.mark.requires("google.api_core") def test_google_vertex_ai_search_get_relevant_documents() -> None: """Test the get_relevant_documents() method.""" retriever = GoogleVertexAISearchRetriever() documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?") assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata["id"] assert doc.metadata["source"] @pytest.mark.requires("google.api_core") def test_google_vertex_ai_multiturnsearch_get_relevant_documents() -> None: """Test the get_relevant_documents() method.""" retriever = GoogleVertexAIMultiTurnSearchRetriever() documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?") assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata["id"] assert doc.metadata["source"] @pytest.mark.requires("google.api_core") def test_google_vertex_ai_search_enterprise_search_deprecation() -> None: """Test the deprecation of GoogleCloudEnterpriseSearchRetriever.""" with pytest.warns( DeprecationWarning, match="GoogleCloudEnterpriseSearchRetriever is deprecated, use GoogleVertexAISearchRetriever", # noqa: E501 ): retriever = GoogleCloudEnterpriseSearchRetriever() os.environ["SEARCH_ENGINE_ID"] = os.getenv("DATA_STORE_ID", "data_store_id") with pytest.warns( DeprecationWarning, match="The `search_engine_id` parameter is deprecated. Use `data_store_id` instead.", # noqa: E501 ): retriever = GoogleCloudEnterpriseSearchRetriever() # Check that mapped methods still work. documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?") assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata["id"] assert doc.metadata["source"]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~volcengine_maas.py
from __future__ import annotations from typing import Any, Dict, Iterator, List, Mapping, Optional, cast from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, FunctionMessage, HumanMessage, SystemMessage, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_community.llms.volcengine_maas import VolcEngineMaasBase def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} elif isinstance(message, FunctionMessage): message_dict = {"role": "function", "content": message.content} else: raise ValueError(f"Got unknown type {message}") return message_dict def convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage: """Convert a dict to a message.""" content = _dict.get("choice", {}).get("message", {}).get("content", "") return AIMessage(content=content) class VolcEngineMaasChat(BaseChatModel, VolcEngineMaasBase): """volc engine maas hosts a plethora of models. You can utilize these models through this class. To use, you should have the ``volcengine`` python package installed. and set access key and secret key by environment variable or direct pass those to this class. access key, secret key are required parameters which you could get help https://www.volcengine.com/docs/6291/65568 In order to use them, it is necessary to install the 'volcengine' Python package. The access key and secret key must be set either via environment variables or passed directly to this class. access key and secret key are mandatory parameters for which assistance can be sought at https://www.volcengine.com/docs/6291/65568. The two methods are as follows: * Environment Variable Set the environment variables 'VOLC_ACCESSKEY' and 'VOLC_SECRETKEY' with your access key and secret key. * Pass Directly to Class Example: .. code-block:: python from langchain_community.llms import VolcEngineMaasLLM model = VolcEngineMaasChat(model="skylark-lite-public", volc_engine_maas_ak="your_ak", volc_engine_maas_sk="your_sk") """ @property def _llm_type(self) -> str: """Return type of chat model.""" return "volc-engine-maas-chat" @classmethod def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" return False @property def _identifying_params(self) -> Dict[str, Any]: return { **{"endpoint": self.endpoint, "model": self.model}, **super()._identifying_params, } def _convert_prompt_msg_params( self, messages: List[BaseMessage], **kwargs: Any, ) -> Dict[str, Any]: model_req = { "model": { "name": self.model, } } if self.model_version is not None: model_req["model"]["version"] = self.model_version return { **model_req, "messages": [_convert_message_to_dict(message) for message in messages], "parameters": {**self._default_params, **kwargs}, } def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) for res in self.client.stream_chat(params): if res: msg = convert_dict_to_message(res) yield ChatGenerationChunk(message=AIMessageChunk(content=msg.content)) if run_manager: run_manager.on_llm_new_token(cast(str, msg.content)) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: completion = "" if self.streaming: for chunk in self._stream(messages, stop, run_manager, **kwargs): completion += chunk.text else: params = self._convert_prompt_msg_params(messages, **kwargs) res = self.client.chat(params) msg = convert_dict_to_message(res) completion = cast(str, msg.content) message = AIMessage(content=completion) return ChatResult(generations=[ChatGeneration(message=message)])
[]