date_collected
stringclasses
1 value
repo_name
stringlengths
6
116
file_name
stringlengths
2
220
file_contents
stringlengths
13
357k
prompts
sequence
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~file_management~write.py
from typing import Optional, Type from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from libs.core.langchain_core.tools import BaseTool from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class WriteFileInput(BaseModel): """Input for WriteFileTool.""" file_path: str = Field(..., description="name of file") text: str = Field(..., description="text to write to file") append: bool = Field( default=False, description="Whether to append to an existing file." ) class WriteFileTool(BaseFileToolMixin, BaseTool): """Tool that writes a file to disk.""" name: str = "write_file" args_schema: Type[BaseModel] = WriteFileInput description: str = "Write file to disk" def _run( self, file_path: str, text: str, append: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: write_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) try: write_path.parent.mkdir(exist_ok=True, parents=False) mode = "a" if append else "w" with write_path.open(mode, encoding="utf-8") as f: f.write(text) return f"File written successfully to {file_path}." except Exception as e: return "Error: " + str(e) # TODO: Add aiofiles method
[ "Write file to disk" ]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_tencentvectordb.py
"""Test TencentVectorDB functionality.""" import time from typing import List, Optional from libs.core.langchain_core.documents import Document from langchain_community.vectorstores import TencentVectorDB from langchain_community.vectorstores.tencentvectordb import ConnectionParams from tests.integration_tests.vectorstores.fake_embeddings import ( FakeEmbeddings, fake_texts, ) def _tencent_vector_db_from_texts( metadatas: Optional[List[dict]] = None, drop: bool = True ) -> TencentVectorDB: conn_params = ConnectionParams( url="http://10.0.X.X", key="eC4bLRy2va******************************", username="root", timeout=20, ) return TencentVectorDB.from_texts( fake_texts, FakeEmbeddings(), metadatas=metadatas, connection_params=conn_params, drop_old=drop, ) def test_tencent_vector_db() -> None: """Test end to end construction and search.""" docsearch = _tencent_vector_db_from_texts() output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")] def test_tencent_vector_db_with_score() -> None: """Test end to end construction and search with scores and IDs.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = _tencent_vector_db_from_texts(metadatas=metadatas) output = docsearch.similarity_search_with_score("foo", k=3) docs = [o[0] for o in output] assert docs == [ Document(page_content="foo", metadata={"page": 0}), Document(page_content="bar", metadata={"page": 1}), Document(page_content="baz", metadata={"page": 2}), ] def test_tencent_vector_db_max_marginal_relevance_search() -> None: """Test end to end construction and MRR search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = _tencent_vector_db_from_texts(metadatas=metadatas) output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3) assert output == [ Document(page_content="foo", metadata={"page": 0}), Document(page_content="bar", metadata={"page": 1}), ] def test_tencent_vector_db_add_extra() -> None: """Test end to end construction and MRR search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = _tencent_vector_db_from_texts(metadatas=metadatas) docsearch.add_texts(texts, metadatas) time.sleep(3) output = docsearch.similarity_search("foo", k=10) assert len(output) == 6 def test_tencent_vector_db_no_drop() -> None: """Test end to end construction and MRR search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = _tencent_vector_db_from_texts(metadatas=metadatas) del docsearch docsearch = _tencent_vector_db_from_texts(metadatas=metadatas, drop=False) time.sleep(3) output = docsearch.similarity_search("foo", k=10) assert len(output) == 6
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~combine_documents~stuff.py
"""Chain that combines documents by stuffing into context.""" from typing import Any, Dict, List, Optional, Tuple from libs.core.langchain_core.documents import Document from libs.core.langchain_core.language_models import LanguageModelLike from libs.core.langchain_core.output_parsers import BaseOutputParser, StrOutputParser from libs.core.langchain_core.prompts import BasePromptTemplate, format_document from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator from libs.core.langchain_core.runnables import Runnable, RunnablePassthrough from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import ( DEFAULT_DOCUMENT_PROMPT, DEFAULT_DOCUMENT_SEPARATOR, DOCUMENTS_KEY, BaseCombineDocumentsChain, _validate_prompt, ) from langchain.chains.llm import LLMChain def create_stuff_documents_chain( llm: LanguageModelLike, prompt: BasePromptTemplate, *, output_parser: Optional[BaseOutputParser] = None, document_prompt: Optional[BasePromptTemplate] = None, document_separator: str = DEFAULT_DOCUMENT_SEPARATOR, ) -> Runnable[Dict[str, Any], Any]: """Create a chain for passing a list of Documents to a model. Args: llm: Language model. prompt: Prompt template. Must contain input variable "context", which will be used for passing in the formatted documents. output_parser: Output parser. Defaults to StrOutputParser. document_prompt: Prompt used for formatting each document into a string. Input variables can be "page_content" or any metadata keys that are in all documents. "page_content" will automatically retrieve the `Document.page_content`, and all other inputs variables will be automatically retrieved from the `Document.metadata` dictionary. Default to a prompt that only contains `Document.page_content`. document_separator: String separator to use between formatted document strings. Returns: An LCEL Runnable. The input is a dictionary that must have a "context" key that maps to a List[Document], and any other input variables expected in the prompt. The Runnable return type depends on output_parser used. Example: .. code-block:: python # pip install -U langchain langchain-community from langchain_community.chat_models import ChatOpenAI from libs.core.langchain_core.documents import Document from libs.core.langchain_core.prompts import ChatPromptTemplate from langchain.chains.combine_documents import create_stuff_documents_chain prompt = ChatPromptTemplate.from_messages( [("system", "What are everyone's favorite colors:\n\n{context}")] ) llm = ChatOpenAI(model_name="gpt-3.5-turbo") chain = create_stuff_documents_chain(llm, prompt) docs = [ Document(page_content="Jesse loves red but not yellow"), Document(page_content = "Jamal loves green but not as much as he loves orange") ] chain.invoke({"context": docs}) """ # noqa: E501 _validate_prompt(prompt) _document_prompt = document_prompt or DEFAULT_DOCUMENT_PROMPT _output_parser = output_parser or StrOutputParser() def format_docs(inputs: dict) -> str: return document_separator.join( format_document(doc, _document_prompt) for doc in inputs[DOCUMENTS_KEY] ) return ( RunnablePassthrough.assign(**{DOCUMENTS_KEY: format_docs}).with_config( run_name="format_inputs" ) | prompt | llm | _output_parser ).with_config(run_name="stuff_documents_chain") class StuffDocumentsChain(BaseCombineDocumentsChain): """Chain that combines documents by stuffing into context. This chain takes a list of documents and first combines them into a single string. It does this by formatting each document into a string with the `document_prompt` and then joining them together with `document_separator`. It then adds that new string to the inputs with the variable name set by `document_variable_name`. Those inputs are then passed to the `llm_chain`. Example: .. code-block:: python from langchain.chains import StuffDocumentsChain, LLMChain from libs.core.langchain_core.prompts import PromptTemplate from langchain.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more # details. document_prompt = PromptTemplate( input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" llm = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template( "Summarize this content: {context}" ) llm_chain = LLMChain(llm=llm, prompt=prompt) chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name ) """ llm_chain: LLMChain """LLM chain which is called with the formatted document string, along with any other inputs.""" document_prompt: BasePromptTemplate = Field( default_factory=lambda: DEFAULT_DOCUMENT_PROMPT ) """Prompt to use to format each document, gets passed to `format_document`.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" document_separator: str = "\n\n" """The string with which to join the formatted documents""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided. If only one variable is present in the llm_chain.prompt, we can infer that the formatted documents should be passed in with this variable name. """ llm_chain_variables = values["llm_chain"].prompt.input_variables if "document_variable_name" not in values: if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain_variables" ) else: if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values @property def input_keys(self) -> List[str]: extra_keys = [ k for k in self.llm_chain.input_keys if k != self.document_variable_name ] return super().input_keys + extra_keys def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict: """Construct inputs from kwargs and docs. Format and then join all the documents together into one input with name `self.document_variable_name`. Also pluck any additional variables from **kwargs. Args: docs: List of documents to format and then join into single input **kwargs: additional inputs to chain, will pluck any other required arguments from here. Returns: dictionary of inputs to LLMChain """ # Format each document according to the prompt doc_strings = [format_document(doc, self.document_prompt) for doc in docs] # Join the documents together to put them in the prompt. inputs = { k: v for k, v in kwargs.items() if k in self.llm_chain.prompt.input_variables } inputs[self.document_variable_name] = self.document_separator.join(doc_strings) return inputs def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]: """Return the prompt length given the documents passed in. This can be used by a caller to determine whether passing in a list of documents would exceed a certain prompt length. This useful when trying to ensure that the size of a prompt remains below a certain context limit. Args: docs: List[Document], a list of documents to use to calculate the total prompt length. Returns: Returns None if the method does not depend on the prompt length, otherwise the length of the prompt in tokens. """ inputs = self._get_inputs(docs, **kwargs) prompt = self.llm_chain.prompt.format(**inputs) return self.llm_chain._get_num_tokens(prompt) def combine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Stuff all documents into one prompt and pass to LLM. Args: docs: List of documents to join together into one variable callbacks: Optional callbacks to pass along **kwargs: additional parameters to use to get inputs to LLMChain. Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ inputs = self._get_inputs(docs, **kwargs) # Call predict on the LLM. return self.llm_chain.predict(callbacks=callbacks, **inputs), {} async def acombine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Async stuff all documents into one prompt and pass to LLM. Args: docs: List of documents to join together into one variable callbacks: Optional callbacks to pass along **kwargs: additional parameters to use to get inputs to LLMChain. Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ inputs = self._get_inputs(docs, **kwargs) # Call predict on the LLM. return await self.llm_chain.apredict(callbacks=callbacks, **inputs), {} @property def _chain_type(self) -> str: return "stuff_documents_chain"
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~smith~evaluation~runner_utils.py
"""Utilities for running language models or Chains over datasets.""" from __future__ import annotations import dataclasses import functools import inspect import logging import uuid from datetime import datetime from enum import Enum from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union, cast, ) from libs.core.langchain_core._api import warn_deprecated from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.messages import BaseMessage, messages_from_dict from libs.core.langchain_core.outputs import ChatResult, LLMResult from libs.core.langchain_core.runnables import Runnable, RunnableConfig, RunnableLambda from libs.core.langchain_core.runnables import config as runnable_config from libs.core.langchain_core.runnables import utils as runnable_utils from libs.core.langchain_core.tracers.evaluation import ( EvaluatorCallbackHandler, wait_for_all_evaluators, ) from libs.core.langchain_core.tracers.langchain import LangChainTracer from langsmith.client import Client from langsmith.evaluation import EvaluationResult, RunEvaluator from langsmith.run_helpers import as_runnable, is_traceable_function from langsmith.schemas import Dataset, DataType, Example, TracerSession from langsmith.utils import LangSmithError from requests import HTTPError from typing_extensions import TypedDict from langchain.callbacks.manager import Callbacks from langchain.chains.base import Chain from langchain.evaluation.loading import load_evaluator from langchain.evaluation.schema import ( EvaluatorType, PairwiseStringEvaluator, StringEvaluator, ) from langchain.smith import evaluation as smith_eval from langchain.smith.evaluation import config as smith_eval_config from langchain.smith.evaluation import name_generation, progress if TYPE_CHECKING: import pandas as pd logger = logging.getLogger(__name__) MODEL_OR_CHAIN_FACTORY = Union[ Callable[[], Union[Chain, Runnable]], BaseLanguageModel, Callable[[dict], Any], Runnable, Chain, ] MCF = Union[Callable[[], Union[Chain, Runnable]], BaseLanguageModel] class InputFormatError(Exception): """Raised when the input format is invalid.""" ## Shared Utilities class TestResult(dict): """A dictionary of the results of a single test run.""" def get_aggregate_feedback( self, ) -> pd.DataFrame: """Return quantiles for the feedback scores. This method calculates and prints the quantiles for the feedback scores across all feedback keys. Returns: A DataFrame containing the quantiles for each feedback key. """ df = self.to_dataframe() # Drop all things starting with inputs., outputs., and reference to_drop = [ col for col in df.columns if col.startswith("inputs.") or col.startswith("outputs.") or col.startswith("reference") ] return df.describe(include="all").drop(to_drop, axis=1) def to_dataframe(self) -> pd.DataFrame: """Convert the results to a dataframe.""" try: import pandas as pd except ImportError as e: raise ImportError( "Pandas is required to convert the results to a dataframe." " to install pandas, run `pip install pandas`." ) from e indices = [] records = [] for example_id, result in self["results"].items(): feedback = result["feedback"] output_ = result.get("output") if isinstance(output_, dict): output = {f"outputs.{k}": v for k, v in output_.items()} elif output_ is None: output = {} else: output = {"output": output_} r = { **{f"inputs.{k}": v for k, v in result["input"].items()}, **output, } if "reference" in result: if isinstance(result["reference"], dict): r.update( {f"reference.{k}": v for k, v in result["reference"].items()} ) else: r["reference"] = result["reference"] r.update( { **{f"feedback.{f.key}": f.score for f in feedback}, "error": result.get("Error"), "execution_time": result["execution_time"], "run_id": result.get("run_id"), } ) records.append(r) indices.append(example_id) return pd.DataFrame(records, index=indices) class EvalError(dict): """Your architecture raised an error.""" def __init__(self, Error: BaseException, **kwargs: Any) -> None: super().__init__(Error=Error, **kwargs) def __getattr__(self, name: str) -> Any: try: return self[name] except KeyError: raise AttributeError(f"'EvalError' object has no attribute '{name}'") def _wrap_in_chain_factory( llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, dataset_name: str = "<my_dataset>", ) -> MCF: """Forgive the user if they pass in a chain without memory instead of a chain factory. It's a common mistake. Raise a more helpful error message as well.""" if isinstance(llm_or_chain_factory, Chain): chain = llm_or_chain_factory chain_class = chain.__class__.__name__ if llm_or_chain_factory.memory is not None: memory_class = chain.memory.__class__.__name__ raise ValueError( "Cannot directly evaluate a chain with stateful memory." " To evaluate this chain, pass in a chain constructor" " that initializes fresh memory each time it is called." " This will safegaurd against information" " leakage between dataset examples." "\nFor example:\n\n" "def chain_constructor():\n" f" new_memory = {memory_class}(...)\n" f" return {chain_class}" "(memory=new_memory, ...)\n\n" f'run_on_dataset("{dataset_name}", chain_constructor, ...)' ) return lambda: chain elif isinstance(llm_or_chain_factory, BaseLanguageModel): return llm_or_chain_factory elif isinstance(llm_or_chain_factory, Runnable): # Memory may exist here, but it's not elegant to check all those cases. lcf = llm_or_chain_factory return lambda: lcf elif callable(llm_or_chain_factory): if is_traceable_function(llm_or_chain_factory): runnable_ = as_runnable(cast(Callable, llm_or_chain_factory)) return lambda: runnable_ try: _model = llm_or_chain_factory() # type: ignore[call-arg] except TypeError: # It's an arbitrary function, wrap it in a RunnableLambda user_func = cast(Callable, llm_or_chain_factory) sig = inspect.signature(user_func) logger.info(f"Wrapping function {sig} as RunnableLambda.") wrapped = RunnableLambda(user_func) return lambda: wrapped constructor = cast(Callable, llm_or_chain_factory) if isinstance(_model, BaseLanguageModel): # It's not uncommon to do an LLM constructor instead of raw LLM, # so we'll unpack it for the user. return _model elif is_traceable_function(cast(Callable, _model)): runnable_ = as_runnable(cast(Callable, _model)) return lambda: runnable_ elif not isinstance(_model, Runnable): # This is unlikely to happen - a constructor for a model function return lambda: RunnableLambda(constructor) else: # Typical correct case return constructor # noqa return llm_or_chain_factory def _get_prompt(inputs: Dict[str, Any]) -> str: """Get prompt from inputs. Args: inputs: The input dictionary. Returns: A string prompt. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError("Inputs should not be empty.") prompts = [] if "prompt" in inputs: if not isinstance(inputs["prompt"], str): raise InputFormatError( "Expected string for 'prompt', got" f" {type(inputs['prompt']).__name__}" ) prompts = [inputs["prompt"]] elif "prompts" in inputs: if not isinstance(inputs["prompts"], list) or not all( isinstance(i, str) for i in inputs["prompts"] ): raise InputFormatError( "Expected list of strings for 'prompts'," f" got {type(inputs['prompts']).__name__}" ) prompts = inputs["prompts"] elif len(inputs) == 1: prompt_ = next(iter(inputs.values())) if isinstance(prompt_, str): prompts = [prompt_] elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_): prompts = prompt_ else: raise InputFormatError(f"LLM Run expects string prompt input. Got {inputs}") else: raise InputFormatError( f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}" ) if len(prompts) == 1: return prompts[0] else: raise InputFormatError( f"LLM Run expects single prompt input. Got {len(prompts)} prompts." ) def _get_messages(inputs: Dict[str, Any]) -> List[BaseMessage]: """Get Chat Messages from inputs. Args: inputs: The input dictionary. Returns: A list of chat messages. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError("Inputs should not be empty.") if "messages" in inputs: single_input = inputs["messages"] elif len(inputs) == 1: single_input = next(iter(inputs.values())) else: raise InputFormatError( f"Chat Run expects 'messages' in inputs when example has multiple" f" input keys. Got {inputs}" ) if isinstance(single_input, list) and all( isinstance(i, dict) for i in single_input ): raw_messages = [single_input] elif isinstance(single_input, list) and all( isinstance(i, list) for i in single_input ): raw_messages = single_input else: raise InputFormatError( f"Chat Run expects List[dict] or List[List[dict]] values for" f" 'messages' key input. Got {inputs}" ) if len(raw_messages) == 1: return messages_from_dict(raw_messages[0]) else: raise InputFormatError( f"Chat Run expects single List[dict] or List[List[dict]] 'messages'" f" input. Got {len(raw_messages)} messages from inputs {inputs}" ) ## Shared data validation utilities def _validate_example_inputs_for_language_model( first_example: Example, input_mapper: Optional[Callable[[Dict], Any]], ) -> None: if input_mapper: prompt_input = input_mapper(first_example.inputs) if not isinstance(prompt_input, str) and not ( isinstance(prompt_input, list) and all(isinstance(msg, BaseMessage) for msg in prompt_input) ): raise InputFormatError( "When using an input_mapper to prepare dataset example inputs" " for an LLM or chat model, the output must a single string or" " a list of chat messages." f"\nGot: {prompt_input} of type {type(prompt_input)}." ) else: try: _get_prompt(first_example.inputs) except InputFormatError: try: _get_messages(first_example.inputs) except InputFormatError: raise InputFormatError( "Example inputs do not match language model input format. " "Expected a dictionary with messages or a single prompt." f" Got: {first_example.inputs}" " Please update your dataset OR provide an input_mapper" " to convert the example.inputs to a compatible format" " for the llm or chat model you wish to evaluate." ) def _validate_example_inputs_for_chain( first_example: Example, chain: Chain, input_mapper: Optional[Callable[[Dict], Any]], ) -> None: """Validate that the example inputs match the chain input keys.""" if input_mapper: first_inputs = input_mapper(first_example.inputs) missing_keys = set(chain.input_keys).difference(first_inputs) if not isinstance(first_inputs, dict): raise InputFormatError( "When using an input_mapper to prepare dataset example" " inputs for a chain, the mapped value must be a dictionary." f"\nGot: {first_inputs} of type {type(first_inputs)}." ) if missing_keys: raise InputFormatError( "Missing keys after loading example using input_mapper." f"\nExpected: {chain.input_keys}. Got: {first_inputs.keys()}" ) else: first_inputs = first_example.inputs missing_keys = set(chain.input_keys).difference(first_inputs) if len(first_inputs) == 1 and len(chain.input_keys) == 1: # We can pass this through the run method. # Refrain from calling to validate. pass elif missing_keys: raise InputFormatError( "Example inputs missing expected chain input keys." " Please provide an input_mapper to convert the example.inputs" " to a compatible format for the chain you wish to evaluate." f"Expected: {chain.input_keys}. " f"Got: {first_inputs.keys()}" ) def _validate_example_inputs( example: Example, llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]], ) -> None: """Validate that the example inputs are valid for the model.""" if isinstance(llm_or_chain_factory, BaseLanguageModel): _validate_example_inputs_for_language_model(example, input_mapper) else: chain = llm_or_chain_factory() if isinstance(chain, Chain): # Otherwise it's a runnable _validate_example_inputs_for_chain(example, chain, input_mapper) elif isinstance(chain, Runnable): logger.debug(f"Skipping input validation for {chain}") ## Shared Evaluator Setup Utilities def _setup_evaluation( llm_or_chain_factory: MCF, examples: List[Example], evaluation: Optional[smith_eval.RunEvalConfig], data_type: DataType, ) -> Optional[List[RunEvaluator]]: """Configure the evaluators to run on the results of the chain.""" if evaluation: if isinstance(llm_or_chain_factory, BaseLanguageModel): run_inputs, run_outputs = None, None run_type = "llm" else: run_type = "chain" if data_type in (DataType.chat, DataType.llm): val = data_type.value if isinstance(data_type, Enum) else data_type raise ValueError( "Cannot evaluate a chain on dataset with " f"data_type={val}. " "Please specify a dataset with the default 'kv' data type." ) chain = llm_or_chain_factory() run_inputs = chain.input_keys if isinstance(chain, Chain) else None run_outputs = chain.output_keys if isinstance(chain, Chain) else None run_evaluators = _load_run_evaluators( evaluation, run_type, data_type, list(examples[0].outputs) if examples[0].outputs else None, run_inputs, run_outputs, ) else: # TODO: Create a default helpfulness evaluator run_evaluators = None return run_evaluators def _determine_input_key( config: smith_eval.RunEvalConfig, run_inputs: Optional[List[str]], ) -> Optional[str]: input_key = None if config.input_key: input_key = config.input_key if run_inputs and input_key not in run_inputs: logger.warning( f"Input key {input_key} not in chain's specified" f" input keys {run_inputs}. Evaluation behavior may be undefined." ) elif run_inputs and len(run_inputs) == 1: input_key = run_inputs[0] elif run_inputs is not None and len(run_inputs) > 1: logger.warning( f"Chain expects multiple input keys: {run_inputs}," f" Evaluator is likely to fail. Evaluation behavior may be undefined." " Specify an input_key in the RunEvalConfig to avoid this warning." ) return input_key def _determine_prediction_key( config: smith_eval.RunEvalConfig, run_outputs: Optional[List[str]], ) -> Optional[str]: prediction_key = None if config.prediction_key: prediction_key = config.prediction_key if run_outputs and prediction_key not in run_outputs: logger.warning( f"Prediction key {prediction_key} not in chain's specified" f" output keys {run_outputs}. Evaluation behavior may be undefined." ) elif run_outputs and len(run_outputs) == 1: prediction_key = run_outputs[0] elif run_outputs is not None and len(run_outputs) > 1: logger.warning( f"Chain expects multiple output keys: {run_outputs}," f" Evaluation behavior may be undefined. Specify a prediction_key" " in the RunEvalConfig to avoid this warning." ) return prediction_key def _determine_reference_key( config: smith_eval.RunEvalConfig, example_outputs: Optional[List[str]], ) -> Optional[str]: if config.reference_key: reference_key = config.reference_key if example_outputs and reference_key not in example_outputs: raise ValueError( f"Reference key {reference_key} not in Dataset" f" example outputs: {example_outputs}" ) elif example_outputs and len(example_outputs) == 1: reference_key = list(example_outputs)[0] else: reference_key = None return reference_key def _construct_run_evaluator( eval_config: Union[EvaluatorType, str, smith_eval_config.EvalConfig], eval_llm: Optional[BaseLanguageModel], run_type: str, data_type: DataType, example_outputs: Optional[List[str]], reference_key: Optional[str], input_key: Optional[str], prediction_key: Optional[str], ) -> RunEvaluator: if isinstance(eval_config, (EvaluatorType, str)): if not isinstance(eval_config, EvaluatorType): eval_config = EvaluatorType(eval_config) evaluator_ = load_evaluator(eval_config, llm=eval_llm) eval_type_tag = eval_config.value else: kwargs = {"llm": eval_llm, **eval_config.get_kwargs()} evaluator_ = load_evaluator(eval_config.evaluator_type, **kwargs) eval_type_tag = eval_config.evaluator_type.value # Override keys if specified in the config if isinstance(eval_config, smith_eval_config.SingleKeyEvalConfig): input_key = eval_config.input_key or input_key prediction_key = eval_config.prediction_key or prediction_key reference_key = eval_config.reference_key or reference_key if isinstance(evaluator_, StringEvaluator): if evaluator_.requires_reference and reference_key is None: raise ValueError( f"Must specify reference_key in smith_eval.RunEvalConfig to use" f" evaluator of type {eval_type_tag} with" f" dataset with multiple output keys: {example_outputs}." ) run_evaluator = smith_eval.StringRunEvaluatorChain.from_run_and_data_type( evaluator_, run_type, data_type, input_key=input_key, prediction_key=prediction_key, reference_key=reference_key, tags=[eval_type_tag], ) elif isinstance(evaluator_, PairwiseStringEvaluator): raise NotImplementedError( f"Run evaluator for {eval_type_tag} is not implemented." " PairwiseStringEvaluators compare the outputs of two different models" " rather than the output of a single model." " Did you mean to use a StringEvaluator instead?" "\nSee: https://python.langchain.com/docs/guides/evaluation/string/" ) else: raise NotImplementedError( f"Run evaluator for {eval_type_tag} is not implemented" ) return run_evaluator def _get_keys( config: smith_eval.RunEvalConfig, run_inputs: Optional[List[str]], run_outputs: Optional[List[str]], example_outputs: Optional[List[str]], ) -> Tuple[Optional[str], Optional[str], Optional[str]]: input_key = _determine_input_key(config, run_inputs) prediction_key = _determine_prediction_key(config, run_outputs) reference_key = _determine_reference_key(config, example_outputs) return input_key, prediction_key, reference_key def _load_run_evaluators( config: smith_eval.RunEvalConfig, run_type: str, data_type: DataType, example_outputs: Optional[List[str]], run_inputs: Optional[List[str]], run_outputs: Optional[List[str]], ) -> List[RunEvaluator]: """ Load run evaluators from a configuration. Args: config: Configuration for the run evaluators. Returns: A list of run evaluators. """ run_evaluators = [] input_key, prediction_key, reference_key = None, None, None if ( config.evaluators or any([isinstance(e, EvaluatorType) for e in config.evaluators]) or ( config.custom_evaluators and any([isinstance(e, StringEvaluator) for e in config.custom_evaluators]) ) ): input_key, prediction_key, reference_key = _get_keys( config, run_inputs, run_outputs, example_outputs ) for eval_config in config.evaluators: run_evaluator = _construct_run_evaluator( eval_config, config.eval_llm, run_type, data_type, example_outputs, reference_key, input_key, prediction_key, ) run_evaluators.append(run_evaluator) custom_evaluators = config.custom_evaluators or [] for custom_evaluator in custom_evaluators: if isinstance(custom_evaluator, RunEvaluator): run_evaluators.append(custom_evaluator) elif isinstance(custom_evaluator, StringEvaluator): run_evaluators.append( smith_eval.StringRunEvaluatorChain.from_run_and_data_type( custom_evaluator, run_type, data_type, input_key=input_key, prediction_key=prediction_key, reference_key=reference_key, ) ) else: raise ValueError( f"Unsupported custom evaluator: {custom_evaluator}." f" Expected RunEvaluator or StringEvaluator." ) return run_evaluators ### Async Helpers async def _arun_llm( llm: BaseLanguageModel, inputs: Dict[str, Any], *, tags: Optional[List[str]] = None, callbacks: Callbacks = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[str, BaseMessage]: """Asynchronously run the language model. Args: llm: The language model to run. inputs: The input dictionary. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. input_mapper: Optional function to map inputs to the expected format. Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: prompt_or_messages = input_mapper(inputs) if isinstance(prompt_or_messages, str): return await llm.apredict( prompt_or_messages, callbacks=callbacks, tags=tags ) elif isinstance(prompt_or_messages, list) and all( isinstance(msg, BaseMessage) for msg in prompt_or_messages ): return await llm.apredict_messages( prompt_or_messages, callbacks=callbacks, tags=tags ) else: raise InputFormatError( "Input mapper returned invalid format" f" {prompt_or_messages}" "\nExpected a single string or list of chat messages." ) else: try: prompt = _get_prompt(inputs) llm_output: Union[str, BaseMessage] = await llm.apredict( prompt, callbacks=callbacks, tags=tags ) except InputFormatError: messages = _get_messages(inputs) llm_output = await llm.apredict_messages( messages, callbacks=callbacks, tags=tags ) return llm_output async def _arun_chain( chain: Union[Chain, Runnable], inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[dict, str]: """Run a chain asynchronously on inputs.""" inputs_ = inputs if input_mapper is None else input_mapper(inputs) if ( isinstance(chain, Chain) and isinstance(inputs_, dict) and len(inputs_) == 1 and chain.input_keys ): val = next(iter(inputs_.values())) output = await chain.acall(val, callbacks=callbacks, tags=tags) else: runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks) output = await chain.ainvoke(inputs_, config=runnable_config) return output async def _arun_llm_or_chain( example: Example, config: RunnableConfig, *, llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[dict, str, LLMResult, ChatResult]: """Asynchronously run the Chain or language model. Args: example: The example to run. llm_or_chain_factory: The Chain or language model constructor to run. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. input_mapper: Optional function to map the input to the expected format. Returns: A list of outputs. """ chain_or_llm = ( "LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain" ) result = None try: if isinstance(llm_or_chain_factory, BaseLanguageModel): output: Any = await _arun_llm( llm_or_chain_factory, example.inputs, tags=config["tags"], callbacks=config["callbacks"], input_mapper=input_mapper, ) else: chain = llm_or_chain_factory() output = await _arun_chain( chain, example.inputs, tags=config["tags"], callbacks=config["callbacks"], input_mapper=input_mapper, ) result = output except Exception as e: logger.warning( f"{chain_or_llm} failed for example {example.id} " f"with inputs {example.inputs}" f"\n{repr(e)}" ) result = EvalError(Error=e) return result ## Sync Utilities def _run_llm( llm: BaseLanguageModel, inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[str, BaseMessage]: """ Run the language model on the example. Args: llm: The language model to run. inputs: The input dictionary. callbacks: The callbacks to use during the run. tags: Optional tags to add to the run. input_mapper: function to map to the inputs dictionary from an Example Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: prompt_or_messages = input_mapper(inputs) if isinstance(prompt_or_messages, str): llm_output: Union[str, BaseMessage] = llm.predict( prompt_or_messages, callbacks=callbacks, tags=tags ) elif isinstance(prompt_or_messages, list) and all( isinstance(msg, BaseMessage) for msg in prompt_or_messages ): llm_output = llm.predict_messages( prompt_or_messages, callbacks=callbacks, tags=tags ) else: raise InputFormatError( "Input mapper returned invalid format: " f" {prompt_or_messages}" "\nExpected a single string or list of chat messages." ) else: try: llm_prompts = _get_prompt(inputs) llm_output = llm.predict(llm_prompts, callbacks=callbacks, tags=tags) except InputFormatError: llm_messages = _get_messages(inputs) llm_output = llm.predict_messages(llm_messages, callbacks=callbacks) return llm_output def _run_chain( chain: Union[Chain, Runnable], inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[Dict, str]: """Run a chain on inputs.""" inputs_ = inputs if input_mapper is None else input_mapper(inputs) if ( isinstance(chain, Chain) and isinstance(inputs_, dict) and len(inputs_) == 1 and chain.input_keys ): val = next(iter(inputs_.values())) output = chain(val, callbacks=callbacks, tags=tags) else: runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks) output = chain.invoke(inputs_, config=runnable_config) return output def _run_llm_or_chain( example: Example, config: RunnableConfig, *, llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[dict, str, LLMResult, ChatResult]: """ Run the Chain or language model synchronously. Args: example: The example to run. llm_or_chain_factory: The Chain or language model constructor to run. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. Returns: Union[List[dict], List[str], List[LLMResult], List[ChatResult]]: The outputs of the model or chain. """ chain_or_llm = ( "LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain" ) result = None try: if isinstance(llm_or_chain_factory, BaseLanguageModel): output: Any = _run_llm( llm_or_chain_factory, example.inputs, config["callbacks"], tags=config["tags"], input_mapper=input_mapper, ) else: chain = llm_or_chain_factory() output = _run_chain( chain, example.inputs, config["callbacks"], tags=config["tags"], input_mapper=input_mapper, ) result = output except Exception as e: error_type = type(e).__name__ logger.warning( f"{chain_or_llm} failed for example {example.id} " f"with inputs {example.inputs}" f"\nError Type: {error_type}, Message: {e}" ) result = EvalError(Error=e) return result ## Public API def _prepare_eval_run( client: Client, dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, project_name: str, project_metadata: Optional[Dict[str, Any]] = None, tags: Optional[List[str]] = None, ) -> Tuple[MCF, TracerSession, Dataset, List[Example]]: wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name) dataset = client.read_dataset(dataset_name=dataset_name) examples = list(client.list_examples(dataset_id=dataset.id)) if not examples: raise ValueError(f"Dataset {dataset_name} has no example rows.") try: project_extra: dict = {"metadata": project_metadata} if project_metadata else {} if tags: project_extra["tags"] = tags project = client.create_project( project_name, reference_dataset_id=dataset.id, project_extra=project_extra, ) except (HTTPError, ValueError, LangSmithError) as e: if "already exists " not in str(e): raise e uid = uuid.uuid4() example_msg = f""" run_on_dataset( ... project_name="{project_name} - {uid}", # Update since {project_name} already exists ) """ raise ValueError( f"Test project {project_name} already exists. Please use a different name:" f"\n\n{example_msg}" ) comparison_url = dataset.url + f"/compare?selectedSessions={project.id}" print( f"View the evaluation results for project '{project_name}'" f" at:\n{comparison_url}\n\n" f"View all tests for Dataset {dataset_name} at:\n{dataset.url}", flush=True, ) return wrapped_model, project, dataset, examples class _RowResult(TypedDict, total=False): """A dictionary of the results for a single example row.""" feedback: Optional[List[EvaluationResult]] execution_time: Optional[float] run_id: Optional[str] @dataclasses.dataclass class _DatasetRunContainer: """A container to help manage the state of a eval run.""" client: Client project: TracerSession wrapped_model: MCF examples: List[Example] configs: List[RunnableConfig] def _merge_test_outputs( self, batch_results: list, all_eval_results: Dict[str, _RowResult], ) -> dict: results: dict = {} for example, output in zip(self.examples, batch_results): row_result = cast(_RowResult, all_eval_results.get(str(example.id), {})) results[str(example.id)] = { "input": example.inputs, "feedback": row_result.get("feedback", []), "execution_time": row_result.get("execution_time"), "run_id": row_result.get("run_id"), } if isinstance(output, EvalError): results[str(example.id)]["Error"] = output.Error else: results[str(example.id)]["output"] = output if example.outputs: results[str(example.id)]["reference"] = example.outputs return results def _collect_metrics(self) -> Dict[str, _RowResult]: all_eval_results: dict = {} for c in self.configs: for callback in cast(list, c["callbacks"]): if isinstance(callback, EvaluatorCallbackHandler): eval_results = callback.logged_eval_results for (_, example_id), v in eval_results.items(): all_eval_results.setdefault(str(example_id), {}).update( {"feedback": v} ) elif isinstance(callback, LangChainTracer): run = callback.latest_run execution_time = ( (run.end_time - run.start_time).total_seconds() if run and run.end_time else None ) run_id = str(run.id) if run else None all_eval_results.setdefault(str(callback.example_id), {}).update( { "execution_time": execution_time, "run_id": run_id, } ) return cast(Dict[str, _RowResult], all_eval_results) def _collect_test_results( self, batch_results: List[Union[dict, str, LLMResult, ChatResult]], ) -> TestResult: wait_for_all_evaluators() all_eval_results = self._collect_metrics() results = self._merge_test_outputs(batch_results, all_eval_results) return TestResult( project_name=self.project.name, results=results, ) def finish(self, batch_results: list, verbose: bool = False) -> TestResult: results = self._collect_test_results(batch_results) if verbose: try: agg_feedback = results.get_aggregate_feedback() _display_aggregate_results(agg_feedback) except Exception as e: logger.debug(f"Failed to print aggregate feedback: {repr(e)}") try: # Closing the project permits name changing and metric optimizations self.client.update_project(self.project.id, end_time=datetime.utcnow()) except Exception as e: logger.debug(f"Failed to close project: {repr(e)}") return results @classmethod def prepare( cls, client: Client, dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, project_name: Optional[str], evaluation: Optional[smith_eval.RunEvalConfig] = None, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, concurrency_level: int = 5, project_metadata: Optional[Dict[str, Any]] = None, ) -> _DatasetRunContainer: project_name = project_name or name_generation.random_name() wrapped_model, project, dataset, examples = _prepare_eval_run( client, dataset_name, llm_or_chain_factory, project_name, project_metadata=project_metadata, tags=tags, ) wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory) run_evaluators = _setup_evaluation( wrapped_model, examples, evaluation, dataset.data_type or DataType.kv ) _validate_example_inputs(examples[0], wrapped_model, input_mapper) progress_bar = progress.ProgressBarCallback(len(examples)) configs = [ RunnableConfig( callbacks=[ LangChainTracer( project_name=project.name, client=client, use_threading=False, example_id=example.id, ), EvaluatorCallbackHandler( evaluators=run_evaluators or [], client=client, example_id=example.id, max_concurrency=0, ), progress_bar, ], tags=tags or [], max_concurrency=concurrency_level, ) for example in examples ] return cls( client=client, project=project, wrapped_model=wrapped_model, examples=examples, configs=configs, ) def _is_jupyter_environment() -> bool: try: from IPython import get_ipython res = get_ipython() return get_ipython() is not None and "zmqshell" in str(type(res)) except ImportError: return False def _display_aggregate_results(aggregate_results: pd.DataFrame) -> None: if _is_jupyter_environment(): from IPython.display import HTML, display display(HTML("<h3>Experiment Results:</h3>")) display(aggregate_results) else: formatted_string = aggregate_results.to_string( float_format=lambda x: f"{x:.2f}", justify="right" ) print("\n Experiment Results:") print(formatted_string) _INPUT_MAPPER_DEP_WARNING = ( "The input_mapper argument is deprecated and " "will be removed in a future release. Please add a " " RunnableLambda to your chain to map inputs to the expected format" " instead. Example:\n" "def construct_chain():\n" " my_chain = ...\n" " input_mapper = {'other_key': 'MyOtherInput', 'my_input_key': x}\n" " return input_mapper | my_chain\n" "run_on_dataset(..., llm_or_chain_factory=construct_chain)\n" "(See https://api.python.langchain.com/en/latest/schema/" "langchain.schema.runnable.base.RunnableLambda.html)" ) async def arun_on_dataset( client: Optional[Client], dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[smith_eval.RunEvalConfig] = None, concurrency_level: int = 5, project_name: Optional[str] = None, project_metadata: Optional[Dict[str, Any]] = None, verbose: bool = False, tags: Optional[List[str]] = None, **kwargs: Any, ) -> Dict[str, Any]: input_mapper = kwargs.pop("input_mapper", None) if input_mapper: warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True) if kwargs: warn_deprecated( "0.0.305", message="The following arguments are deprecated and " "will be removed in a future release: " f"{kwargs.keys()}.", removal="0.0.305", ) client = client or Client() container = _DatasetRunContainer.prepare( client, dataset_name, llm_or_chain_factory, project_name, evaluation, tags, input_mapper, concurrency_level, project_metadata=project_metadata, ) batch_results = await runnable_utils.gather_with_concurrency( container.configs[0].get("max_concurrency"), *map( functools.partial( _arun_llm_or_chain, llm_or_chain_factory=container.wrapped_model, input_mapper=input_mapper, ), container.examples, container.configs, ), ) return container.finish(batch_results, verbose=verbose) def run_on_dataset( client: Optional[Client], dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[smith_eval.RunEvalConfig] = None, concurrency_level: int = 5, project_name: Optional[str] = None, project_metadata: Optional[Dict[str, Any]] = None, verbose: bool = False, tags: Optional[List[str]] = None, **kwargs: Any, ) -> Dict[str, Any]: input_mapper = kwargs.pop("input_mapper", None) if input_mapper: warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True) if kwargs: warn_deprecated( "0.0.305", message="The following arguments are deprecated and " "will be removed in a future release: " f"{kwargs.keys()}.", removal="0.0.305", ) client = client or Client() container = _DatasetRunContainer.prepare( client, dataset_name, llm_or_chain_factory, project_name, evaluation, tags, input_mapper, concurrency_level, project_metadata=project_metadata, ) if concurrency_level == 0: batch_results = [ _run_llm_or_chain( example, config, llm_or_chain_factory=container.wrapped_model, input_mapper=input_mapper, ) for example, config in zip(container.examples, container.configs) ] else: with runnable_config.get_executor_for_config(container.configs[0]) as executor: batch_results = list( executor.map( functools.partial( _run_llm_or_chain, llm_or_chain_factory=container.wrapped_model, input_mapper=input_mapper, ), container.examples, container.configs, ) ) return container.finish(batch_results, verbose=verbose) _RUN_ON_DATASET_DOCSTRING = """ Run the Chain or language model on a dataset and store traces to the specified project name. Args: dataset_name: Name of the dataset to run the chain on. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. evaluation: Configuration for evaluators to run on the results of the chain concurrency_level: The number of async tasks to run concurrently. project_name: Name of the project to store the traces in. Defaults to {dataset_name}-{chain class name}-{datetime}. project_metadata: Optional metadata to add to the project. Useful for storing information the test variant. (prompt version, model version, etc.) client: LangSmith client to use to access the dataset and to log feedback and run traces. verbose: Whether to print progress. tags: Tags to add to each run in the project. Returns: A dictionary containing the run's project name and the resulting model outputs. For the (usually faster) async version of this function, see :func:`arun_on_dataset`. Examples -------- .. code-block:: python from langsmith import Client from langchain.chat_models import ChatOpenAI from langchain.chains import LLMChain from langchain.smith import smith_eval.RunEvalConfig, run_on_dataset # Chains may have memory. Passing in a constructor function lets the # evaluation framework avoid cross-contamination between runs. def construct_chain(): llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string( llm, "What's the answer to {your_input_key}" ) return chain # Load off-the-shelf evaluators via config or the EvaluatorType (string or enum) evaluation_config = smith_eval.RunEvalConfig( evaluators=[ "qa", # "Correctness" against a reference answer "embedding_distance", smith_eval.RunEvalConfig.Criteria("helpfulness"), smith_eval.RunEvalConfig.Criteria({ "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" }), ] ) client = Client() run_on_dataset( client, "<my_dataset_name>", construct_chain, evaluation=evaluation_config, ) You can also create custom evaluators by subclassing the :class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>` or LangSmith's `RunEvaluator` classes. .. code-block:: python from typing import Optional from langchain.evaluation import StringEvaluator class MyStringEvaluator(StringEvaluator): @property def requires_input(self) -> bool: return False @property def requires_reference(self) -> bool: return True @property def evaluation_name(self) -> str: return "exact_match" def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict: return {"score": prediction == reference} evaluation_config = smith_eval.RunEvalConfig( custom_evaluators = [MyStringEvaluator()], ) run_on_dataset( client, "<my_dataset_name>", construct_chain, evaluation=evaluation_config, ) """ # noqa: E501 run_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING arun_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING.replace( "run_on_dataset(", "await arun_on_dataset(" )
[ "['PLACEHOLDER']", "[]" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~tencent_cos_directory.py
from typing import Any, Iterator, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.document_loaders.tencent_cos_file import TencentCOSFileLoader class TencentCOSDirectoryLoader(BaseLoader): """Load from `Tencent Cloud COS` directory.""" def __init__(self, conf: Any, bucket: str, prefix: str = ""): """Initialize with COS config, bucket and prefix. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param prefix(str): prefix. """ self.conf = conf self.bucket = bucket self.prefix = prefix def load(self) -> List[Document]: return list(self.lazy_load()) def lazy_load(self) -> Iterator[Document]: """Load documents.""" try: from qcloud_cos import CosS3Client except ImportError: raise ImportError( "Could not import cos-python-sdk-v5 python package. " "Please install it with `pip install cos-python-sdk-v5`." ) client = CosS3Client(self.conf) contents = [] marker = "" while True: response = client.list_objects( Bucket=self.bucket, Prefix=self.prefix, Marker=marker, MaxKeys=1000 ) if "Contents" in response: contents.extend(response["Contents"]) if response["IsTruncated"] == "false": break marker = response["NextMarker"] for content in contents: if content["Key"].endswith("/"): continue loader = TencentCOSFileLoader(self.conf, self.bucket, content["Key"]) yield loader.load()[0]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~mlflow_ai_gateway.py
from __future__ import annotations import warnings from typing import Any, Dict, List, Mapping, Optional from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra # Ignoring type because below is valid pydantic code # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class Params(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Parameters for the MLflow AI Gateway LLM.""" temperature: float = 0.0 candidate_count: int = 1 """The number of candidates to return.""" stop: Optional[List[str]] = None max_tokens: Optional[int] = None class MlflowAIGateway(LLM): """ Wrapper around completions LLMs in the MLflow AI Gateway. To use, you should have the ``mlflow[gateway]`` python package installed. For more information, see https://mlflow.org/docs/latest/gateway/index.html. Example: .. code-block:: python from langchain_community.llms import MlflowAIGateway completions = MlflowAIGateway( gateway_uri="<your-mlflow-ai-gateway-uri>", route="<your-mlflow-ai-gateway-completions-route>", params={ "temperature": 0.1 } ) """ route: str gateway_uri: Optional[str] = None params: Optional[Params] = None def __init__(self, **kwargs: Any): warnings.warn( "`MlflowAIGateway` is deprecated. Use `Mlflow` or `Databricks` instead.", DeprecationWarning, ) try: import mlflow.gateway except ImportError as e: raise ImportError( "Could not import `mlflow.gateway` module. " "Please install it with `pip install mlflow[gateway]`." ) from e super().__init__(**kwargs) if self.gateway_uri: mlflow.gateway.set_gateway_uri(self.gateway_uri) @property def _default_params(self) -> Dict[str, Any]: params: Dict[str, Any] = { "gateway_uri": self.gateway_uri, "route": self.route, **(self.params.dict() if self.params else {}), } return params @property def _identifying_params(self) -> Mapping[str, Any]: return self._default_params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: try: import mlflow.gateway except ImportError as e: raise ImportError( "Could not import `mlflow.gateway` module. " "Please install it with `pip install mlflow[gateway]`." ) from e data: Dict[str, Any] = { "prompt": prompt, **(self.params.dict() if self.params else {}), } if s := (stop or (self.params.stop if self.params else None)): data["stop"] = s resp = mlflow.gateway.query(self.route, data=data) return resp["candidates"][0]["text"] @property def _llm_type(self) -> str: return "mlflow-ai-gateway"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~anyscale.py
"""Wrapper around Anyscale Endpoint""" from typing import ( Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, Set, Tuple, cast, ) from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.outputs import Generation, GenerationChunk, LLMResult from libs.core.langchain_core.pydantic_v1 import Field, SecretStr, root_validator from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env from langchain_community.llms.openai import ( BaseOpenAI, acompletion_with_retry, completion_with_retry, ) def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def create_llm_result( choices: Any, prompts: List[str], token_usage: Dict[str, int], model_name: str ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): choice = choices[i] generations.append( [ Generation( text=choice["message"]["content"], generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ), ) ] ) llm_output = {"token_usage": token_usage, "model_name": model_name} return LLMResult(generations=generations, llm_output=llm_output) class Anyscale(BaseOpenAI): """Anyscale large language models. To use, you should have the environment variable ``ANYSCALE_API_BASE`` and ``ANYSCALE_API_KEY``set with your Anyscale Endpoint, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.llms import Anyscale anyscalellm = Anyscale(anyscale_api_base="ANYSCALE_API_BASE", anyscale_api_key="ANYSCALE_API_KEY", model_name="meta-llama/Llama-2-7b-chat-hf") # To leverage Ray for parallel processing @ray.remote(num_cpus=1) def send_query(llm, text): resp = llm(text) return resp futures = [send_query.remote(anyscalellm, text) for text in texts] results = ray.get(futures) """ """Key word arguments to pass to the model.""" anyscale_api_base: Optional[str] = None anyscale_api_key: Optional[SecretStr] = None prefix_messages: List = Field(default_factory=list) @classmethod def is_lc_serializable(cls) -> bool: return False @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["anyscale_api_base"] = get_from_dict_or_env( values, "anyscale_api_base", "ANYSCALE_API_BASE" ) values["anyscale_api_key"] = convert_to_secret_str( get_from_dict_or_env(values, "anyscale_api_key", "ANYSCALE_API_KEY") ) try: import openai ## Always create ChatComplete client, replacing the legacy Complete client values["client"] = openai.ChatCompletion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_name": self.model_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = { "api_key": cast(SecretStr, self.anyscale_api_key).get_secret_value(), "api_base": self.anyscale_api_base, } return {**openai_creds, **{"model": self.model_name}, **super()._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "Anyscale LLM" def _get_chat_messages( self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"Anyscale currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = self._invocation_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for Chat api, omitting max_tokens is equivalent to having no limit del params["max_tokens"] return messages, params def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: messages, params = self._get_chat_messages([prompt], stop) params = {**params, **kwargs, "stream": True} for stream_resp in completion_with_retry( self, messages=messages, run_manager=run_manager, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") chunk = GenerationChunk(text=token) yield chunk if run_manager: run_manager.on_llm_new_token(token, chunk=chunk) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: messages, params = self._get_chat_messages([prompt], stop) params = {**params, **kwargs, "stream": True} async for stream_resp in await acompletion_with_retry( self, messages=messages, run_manager=run_manager, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") chunk = GenerationChunk(text=token) yield chunk if run_manager: await run_manager.on_llm_new_token(token, chunk=chunk) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: choices = [] token_usage: Dict[str, int] = {} _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for prompt in prompts: if self.streaming: generation: Optional[GenerationChunk] = None for chunk in self._stream(prompt, stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None choices.append( { "message": {"content": generation.text}, "finish_reason": generation.generation_info.get("finish_reason") if generation.generation_info else None, "logprobs": generation.generation_info.get("logprobs") if generation.generation_info else None, } ) else: messages, params = self._get_chat_messages([prompt], stop) params = {**params, **kwargs} response = completion_with_retry( self, messages=messages, run_manager=run_manager, **params ) choices.extend(response["choices"]) update_token_usage(_keys, response, token_usage) return create_llm_result(choices, prompts, token_usage, self.model_name) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: choices = [] token_usage: Dict[str, int] = {} _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for prompt in prompts: messages = self.prefix_messages + [{"role": "user", "content": prompt}] if self.streaming: generation: Optional[GenerationChunk] = None async for chunk in self._astream(prompt, stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None choices.append( { "message": {"content": generation.text}, "finish_reason": generation.generation_info.get("finish_reason") if generation.generation_info else None, "logprobs": generation.generation_info.get("logprobs") if generation.generation_info else None, } ) else: messages, params = self._get_chat_messages([prompt], stop) params = {**params, **kwargs} response = await acompletion_with_retry( self, messages=messages, run_manager=run_manager, **params ) choices.extend(response["choices"]) update_token_usage(_keys, response, token_usage) return create_llm_result(choices, prompts, token_usage, self.model_name)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~typesense.py
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils import get_from_env from libs.core.langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from typesense.client import Client from typesense.collection import Collection class Typesense(VectorStore): """`Typesense` vector store. To use, you should have the ``typesense`` python package installed. Example: .. code-block:: python from langchain_community.embedding.openai import OpenAIEmbeddings from langchain_community.vectorstores import Typesense import typesense node = { "host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net "port": "8108", # For Typesense Cloud use 443 "protocol": "http" # For Typesense Cloud use https } typesense_client = typesense.Client( { "nodes": [node], "api_key": "<API_KEY>", "connection_timeout_seconds": 2 } ) typesense_collection_name = "langchain-memory" embedding = OpenAIEmbeddings() vectorstore = Typesense( typesense_client=typesense_client, embedding=embedding, typesense_collection_name=typesense_collection_name, text_key="text", ) """ def __init__( self, typesense_client: Client, embedding: Embeddings, *, typesense_collection_name: Optional[str] = None, text_key: str = "text", ): """Initialize with Typesense client.""" try: from typesense import Client except ImportError: raise ImportError( "Could not import typesense python package. " "Please install it with `pip install typesense`." ) if not isinstance(typesense_client, Client): raise ValueError( f"typesense_client should be an instance of typesense.Client, " f"got {type(typesense_client)}" ) self._typesense_client = typesense_client self._embedding = embedding self._typesense_collection_name = ( typesense_collection_name or f"langchain-{str(uuid.uuid4())}" ) self._text_key = text_key @property def _collection(self) -> Collection: return self._typesense_client.collections[self._typesense_collection_name] @property def embeddings(self) -> Embeddings: return self._embedding def _prep_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]], ) -> List[dict]: """Embed and create the documents""" _ids = ids or (str(uuid.uuid4()) for _ in texts) _metadatas: Iterable[dict] = metadatas or ({} for _ in texts) embedded_texts = self._embedding.embed_documents(list(texts)) return [ {"id": _id, "vec": vec, f"{self._text_key}": text, "metadata": metadata} for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas) ] def _create_collection(self, num_dim: int) -> None: fields = [ {"name": "vec", "type": "float[]", "num_dim": num_dim}, {"name": f"{self._text_key}", "type": "string"}, {"name": ".*", "type": "auto"}, ] self._typesense_client.collections.create( {"name": self._typesense_collection_name, "fields": fields} ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids from adding the texts into the vectorstore. """ from typesense.exceptions import ObjectNotFound docs = self._prep_texts(texts, metadatas, ids) try: self._collection.documents.import_(docs, {"action": "upsert"}) except ObjectNotFound: # Create the collection if it doesn't already exist self._create_collection(len(docs[0]["vec"])) self._collection.documents.import_(docs, {"action": "upsert"}) return [doc["id"] for doc in docs] def similarity_search_with_score( self, query: str, k: int = 10, filter: Optional[str] = "", ) -> List[Tuple[Document, float]]: """Return typesense documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ embedded_query = [str(x) for x in self._embedding.embed_query(query)] query_obj = { "q": "*", "vector_query": f'vec:([{",".join(embedded_query)}], k:{k})', "filter_by": filter, "collection": self._typesense_collection_name, } docs = [] response = self._typesense_client.multi_search.perform( {"searches": [query_obj]}, {} ) for hit in response["results"][0]["hits"]: document = hit["document"] metadata = document["metadata"] text = document[self._text_key] score = hit["vector_distance"] docs.append((Document(page_content=text, metadata=metadata), score)) return docs def similarity_search( self, query: str, k: int = 10, filter: Optional[str] = "", **kwargs: Any, ) -> List[Document]: """Return typesense documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter) return [doc for doc, _ in docs_and_score] @classmethod def from_client_params( cls, embedding: Embeddings, *, host: str = "localhost", port: Union[str, int] = "8108", protocol: str = "http", typesense_api_key: Optional[str] = None, connection_timeout_seconds: int = 2, **kwargs: Any, ) -> Typesense: """Initialize Typesense directly from client parameters. Example: .. code-block:: python from langchain_community.embedding.openai import OpenAIEmbeddings from langchain_community.vectorstores import Typesense # Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY". vectorstore = Typesense( OpenAIEmbeddings(), host="localhost", port="8108", protocol="http", typesense_collection_name="langchain-memory", ) """ try: from typesense import Client except ImportError: raise ValueError( "Could not import typesense python package. " "Please install it with `pip install typesense`." ) node = { "host": host, "port": str(port), "protocol": protocol, } typesense_api_key = typesense_api_key or get_from_env( "typesense_api_key", "TYPESENSE_API_KEY" ) client_config = { "nodes": [node], "api_key": typesense_api_key, "connection_timeout_seconds": connection_timeout_seconds, } return cls(Client(client_config), embedding, **kwargs) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, typesense_client: Optional[Client] = None, typesense_client_params: Optional[dict] = None, typesense_collection_name: Optional[str] = None, text_key: str = "text", **kwargs: Any, ) -> Typesense: """Construct Typesense wrapper from raw text.""" if typesense_client: vectorstore = cls(typesense_client, embedding, **kwargs) elif typesense_client_params: vectorstore = cls.from_client_params( embedding, **typesense_client_params, **kwargs ) else: raise ValueError( "Must specify one of typesense_client or typesense_client_params." ) vectorstore.add_texts(texts, metadatas=metadatas, ids=ids) return vectorstore
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~evaluation~qa~eval_chain.py
"""LLM Chains for evaluating question answering.""" from __future__ import annotations import re import string from typing import Any, List, Optional, Sequence, Tuple from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.prompts import PromptTemplate from libs.core.langchain_core.pydantic_v1 import Extra from langchain.callbacks.manager import Callbacks from langchain.chains.llm import LLMChain from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT from langchain.evaluation.schema import LLMEvalChain, StringEvaluator from langchain.schema import RUN_KEY def _get_score(text: str) -> Optional[Tuple[str, int]]: match = re.search(r"grade:\s*(correct|incorrect)", text.strip(), re.IGNORECASE) if match: if match.group(1).upper() == "CORRECT": return "CORRECT", 1 elif match.group(1).upper() == "INCORRECT": return "INCORRECT", 0 try: first_word = ( text.strip().split()[0].translate(str.maketrans("", "", string.punctuation)) ) if first_word.upper() == "CORRECT": return "CORRECT", 1 elif first_word.upper() == "INCORRECT": return "INCORRECT", 0 last_word = ( text.strip() .split()[-1] .translate(str.maketrans("", "", string.punctuation)) ) if last_word.upper() == "CORRECT": return "CORRECT", 1 elif last_word.upper() == "INCORRECT": return "INCORRECT", 0 except IndexError: pass return None def _parse_string_eval_output(text: str) -> dict: """Parse the output text. Args: text (str): The output text to parse. Returns: Any: The parsed output. """ reasoning = text.strip() parsed_scores = _get_score(reasoning) if parsed_scores is None: value, score = None, None else: value, score = parsed_scores return { "reasoning": reasoning, "value": value, "score": score, } class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): """LLM Chain for evaluating question answering.""" output_key: str = "results" #: :meta private: class Config: """Configuration for the QAEvalChain.""" extra = Extra.ignore @classmethod def is_lc_serializable(cls) -> bool: return False @property def evaluation_name(self) -> str: return "correctness" @property def requires_reference(self) -> bool: return True @property def requires_input(self) -> bool: return True @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> QAEvalChain: """Load QA Eval Chain from LLM. Args: llm (BaseLanguageModel): the base language model to use. prompt (PromptTemplate): A prompt template containing the input_variables: 'input', 'answer' and 'result' that will be used as the prompt for evaluation. Defaults to PROMPT. **kwargs: additional keyword arguments. Returns: QAEvalChain: the loaded QA eval chain. """ prompt = prompt or PROMPT expected_input_vars = {"query", "answer", "result"} if expected_input_vars != set(prompt.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt.input_variables}" ) return cls(llm=llm, prompt=prompt, **kwargs) def evaluate( self, examples: Sequence[dict], predictions: Sequence[dict], question_key: str = "query", answer_key: str = "answer", prediction_key: str = "result", *, callbacks: Callbacks = None, ) -> List[dict]: """Evaluate question answering examples and predictions.""" inputs = [ { "query": example[question_key], "answer": example[answer_key], "result": predictions[i][prediction_key], } for i, example in enumerate(examples) ] return self.apply(inputs, callbacks=callbacks) def _prepare_output(self, result: dict) -> dict: parsed_result = _parse_string_eval_output(result[self.output_key]) if RUN_KEY in result: parsed_result[RUN_KEY] = result[RUN_KEY] return parsed_result def _evaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): the LLM or chain prediction to evaluate. reference (Optional[str], optional): the reference label to evaluate against. input (Optional[str], optional): the input to consider during evaluation callbacks (Callbacks, optional): the callbacks to use for tracing. include_run_info (bool, optional): whether to include run info in the returned results. **kwargs: additional keyword arguments, including callbacks, tags, etc. Returns: dict: The evaluation results containing the score or value. """ result = self( { "query": input, "answer": reference, "result": prediction, }, callbacks=callbacks, include_run_info=include_run_info, ) return self._prepare_output(result) async def _aevaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: result = await self.acall( inputs={"query": input, "answer": reference, "result": prediction}, callbacks=callbacks, include_run_info=include_run_info, ) return self._prepare_output(result) class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): """LLM Chain for evaluating QA w/o GT based on context""" @classmethod def is_lc_serializable(cls) -> bool: return False @property def requires_reference(self) -> bool: """Whether the chain requires a reference string.""" return True @property def requires_input(self) -> bool: """Whether the chain requires an input string.""" return True class Config: """Configuration for the QAEvalChain.""" extra = Extra.ignore @classmethod def _validate_input_vars(cls, prompt: PromptTemplate) -> None: expected_input_vars = {"query", "context", "result"} if expected_input_vars != set(prompt.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt.input_variables}" ) @property def evaluation_name(self) -> str: return "Contextual Accuracy" @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> ContextQAEvalChain: """Load QA Eval Chain from LLM. Args: llm (BaseLanguageModel): the base language model to use. prompt (PromptTemplate): A prompt template containing the input_variables: 'query', 'context' and 'result' that will be used as the prompt for evaluation. Defaults to PROMPT. **kwargs: additional keyword arguments. Returns: ContextQAEvalChain: the loaded QA eval chain. """ prompt = prompt or CONTEXT_PROMPT cls._validate_input_vars(prompt) return cls(llm=llm, prompt=prompt, **kwargs) def evaluate( self, examples: List[dict], predictions: List[dict], question_key: str = "query", context_key: str = "context", prediction_key: str = "result", *, callbacks: Callbacks = None, ) -> List[dict]: """Evaluate question answering examples and predictions.""" inputs = [ { "query": example[question_key], "context": example[context_key], "result": predictions[i][prediction_key], } for i, example in enumerate(examples) ] return self.apply(inputs, callbacks=callbacks) def _prepare_output(self, result: dict) -> dict: parsed_result = _parse_string_eval_output(result[self.output_key]) if RUN_KEY in result: parsed_result[RUN_KEY] = result[RUN_KEY] return parsed_result def _evaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: result = self( { "query": input, "context": reference, "result": prediction, }, callbacks=callbacks, include_run_info=include_run_info, ) return self._prepare_output(result) async def _aevaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: result = await self.acall( inputs={"query": input, "context": reference, "result": prediction}, callbacks=callbacks, include_run_info=include_run_info, ) return self._prepare_output(result) class CotQAEvalChain(ContextQAEvalChain): """LLM Chain for evaluating QA using chain of thought reasoning.""" @classmethod def is_lc_serializable(cls) -> bool: return False @property def evaluation_name(self) -> str: return "COT Contextual Accuracy" @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> CotQAEvalChain: """Load QA Eval Chain from LLM.""" prompt = prompt or COT_PROMPT cls._validate_input_vars(prompt) return cls(llm=llm, prompt=prompt, **kwargs)
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~storage~file_system.py
import os import re from pathlib import Path from typing import Iterator, List, Optional, Sequence, Tuple, Union from libs.core.langchain_core.stores import ByteStore from langchain.storage.exceptions import InvalidKeyException class LocalFileStore(ByteStore): """BaseStore interface that works on the local file system. Examples: Create a LocalFileStore instance and perform operations on it: .. code-block:: python from langchain.storage import LocalFileStore # Instantiate the LocalFileStore with the root path file_store = LocalFileStore("/path/to/root") # Set values for keys file_store.mset([("key1", b"value1"), ("key2", b"value2")]) # Get values for keys values = file_store.mget(["key1", "key2"]) # Returns [b"value1", b"value2"] # Delete keys file_store.mdelete(["key1"]) # Iterate over keys for key in file_store.yield_keys(): print(key) """ def __init__(self, root_path: Union[str, Path]) -> None: """Implement the BaseStore interface for the local file system. Args: root_path (Union[str, Path]): The root path of the file store. All keys are interpreted as paths relative to this root. """ self.root_path = Path(root_path).absolute() def _get_full_path(self, key: str) -> Path: """Get the full path for a given key relative to the root path. Args: key (str): The key relative to the root path. Returns: Path: The full path for the given key. """ if not re.match(r"^[a-zA-Z0-9_.\-/]+$", key): raise InvalidKeyException(f"Invalid characters in key: {key}") full_path = os.path.abspath(self.root_path / key) common_path = os.path.commonpath([str(self.root_path), full_path]) if common_path != str(self.root_path): raise InvalidKeyException( f"Invalid key: {key}. Key should be relative to the full path." f"{self.root_path} vs. {common_path} and full path of {full_path}" ) return Path(full_path) def mget(self, keys: Sequence[str]) -> List[Optional[bytes]]: """Get the values associated with the given keys. Args: keys: A sequence of keys. Returns: A sequence of optional values associated with the keys. If a key is not found, the corresponding value will be None. """ values: List[Optional[bytes]] = [] for key in keys: full_path = self._get_full_path(key) if full_path.exists(): value = full_path.read_bytes() values.append(value) else: values.append(None) return values def mset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) -> None: """Set the values for the given keys. Args: key_value_pairs: A sequence of key-value pairs. Returns: None """ for key, value in key_value_pairs: full_path = self._get_full_path(key) full_path.parent.mkdir(parents=True, exist_ok=True) full_path.write_bytes(value) def mdelete(self, keys: Sequence[str]) -> None: """Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. Returns: None """ for key in keys: full_path = self._get_full_path(key) if full_path.exists(): full_path.unlink() def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]: """Get an iterator over keys that match the given prefix. Args: prefix (Optional[str]): The prefix to match. Returns: Iterator[str]: An iterator over keys that match the given prefix. """ prefix_path = self._get_full_path(prefix) if prefix else self.root_path for file in prefix_path.rglob("*"): if file.is_file(): relative_path = file.relative_to(self.root_path) yield str(relative_path)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~xinference.py
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Mapping, Optional, Union from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM if TYPE_CHECKING: from xinference.client import RESTfulChatModelHandle, RESTfulGenerateModelHandle from xinference.model.llm.core import LlamaCppGenerateConfig class Xinference(LLM): """`Xinference` large-scale model inference service. To use, you should have the xinference library installed: .. code-block:: bash pip install "xinference[all]" Check out: https://github.com/xorbitsai/inference To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers Example: To start a local instance of Xinference, run .. code-block:: bash $ xinference You can also deploy Xinference in a distributed cluster. Here are the steps: Starting the supervisor: .. code-block:: bash $ xinference-supervisor Starting the worker: .. code-block:: bash $ xinference-worker Then, launch a model using command line interface (CLI). Example: .. code-block:: bash $ xinference launch -n orca -s 3 -q q4_0 It will return a model UID. Then, you can use Xinference with LangChain. Example: .. code-block:: python from langchain_community.llms import Xinference llm = Xinference( server_url="http://0.0.0.0:9997", model_uid = {model_uid} # replace model_uid with the model UID return from launching the model ) llm( prompt="Q: where can we visit in the capital of France? A:", generate_config={"max_tokens": 1024, "stream": True}, ) To view all the supported builtin models, run: .. code-block:: bash $ xinference list --all """ # noqa: E501 client: Any server_url: Optional[str] """URL of the xinference server""" model_uid: Optional[str] """UID of the launched model""" model_kwargs: Dict[str, Any] """Keyword arguments to be passed to xinference.LLM""" def __init__( self, server_url: Optional[str] = None, model_uid: Optional[str] = None, **model_kwargs: Any, ): try: from xinference.client import RESTfulClient except ImportError as e: raise ImportError( "Could not import RESTfulClient from xinference. Please install it" " with `pip install xinference`." ) from e model_kwargs = model_kwargs or {} super().__init__( **{ "server_url": server_url, "model_uid": model_uid, "model_kwargs": model_kwargs, } ) if self.server_url is None: raise ValueError("Please provide server URL") if self.model_uid is None: raise ValueError("Please provide the model UID") self.client = RESTfulClient(server_url) @property def _llm_type(self) -> str: """Return type of llm.""" return "xinference" @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"server_url": self.server_url}, **{"model_uid": self.model_uid}, **{"model_kwargs": self.model_kwargs}, } def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the xinference model and return the output. Args: prompt: The prompt to use for generation. stop: Optional list of stop words to use when generating. generate_config: Optional dictionary for the configuration used for generation. Returns: The generated string by the model. """ model = self.client.get_model(self.model_uid) generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {}) generate_config = {**self.model_kwargs, **generate_config} if stop: generate_config["stop"] = stop if generate_config and generate_config.get("stream"): combined_text_output = "" for token in self._stream_generate( model=model, prompt=prompt, run_manager=run_manager, generate_config=generate_config, ): combined_text_output += token return combined_text_output else: completion = model.generate(prompt=prompt, generate_config=generate_config) return completion["choices"][0]["text"] def _stream_generate( self, model: Union["RESTfulGenerateModelHandle", "RESTfulChatModelHandle"], prompt: str, run_manager: Optional[CallbackManagerForLLMRun] = None, generate_config: Optional["LlamaCppGenerateConfig"] = None, ) -> Generator[str, None, None]: """ Args: prompt: The prompt to use for generation. model: The model used for generation. stop: Optional list of stop words to use when generating. generate_config: Optional dictionary for the configuration used for generation. Yields: A string token. """ streaming_response = model.generate( prompt=prompt, generate_config=generate_config ) for chunk in streaming_response: if isinstance(chunk, dict): choices = chunk.get("choices", []) if choices: choice = choices[0] if isinstance(choice, dict): token = choice.get("text", "") log_probs = choice.get("logprobs") if run_manager: run_manager.on_llm_new_token( token=token, verbose=self.verbose, log_probs=log_probs ) yield token
[]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~retrievers~document_compressors~test_base.py
"""Integration test for compression pipelines.""" from libs.core.langchain_core.documents import Document from langchain.document_transformers import EmbeddingsRedundantFilter from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.document_compressors import ( DocumentCompressorPipeline, EmbeddingsFilter, ) from langchain.text_splitter import CharacterTextSplitter def test_document_compressor_pipeline() -> None: embeddings = OpenAIEmbeddings() splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator=". ") redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8) pipeline_filter = DocumentCompressorPipeline( transformers=[splitter, redundant_filter, relevant_filter] ) texts = [ "This sentence is about cows", "This sentence was about cows", "foo bar baz", ] docs = [Document(page_content=". ".join(texts))] actual = pipeline_filter.compress_documents(docs, "Tell me about farm animals") assert len(actual) == 1 assert actual[0].page_content in texts[:2]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~github~toolkit.py
"""GitHub Toolkit.""" from typing import Dict, List from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.agent_toolkits.base import BaseToolkit from langchain_community.tools import BaseTool from langchain_community.tools.github.prompt import ( COMMENT_ON_ISSUE_PROMPT, CREATE_BRANCH_PROMPT, CREATE_FILE_PROMPT, CREATE_PULL_REQUEST_PROMPT, CREATE_REVIEW_REQUEST_PROMPT, DELETE_FILE_PROMPT, GET_FILES_FROM_DIRECTORY_PROMPT, GET_ISSUE_PROMPT, GET_ISSUES_PROMPT, GET_PR_PROMPT, LIST_BRANCHES_IN_REPO_PROMPT, LIST_PRS_PROMPT, LIST_PULL_REQUEST_FILES, OVERVIEW_EXISTING_FILES_BOT_BRANCH, OVERVIEW_EXISTING_FILES_IN_MAIN, READ_FILE_PROMPT, SEARCH_CODE_PROMPT, SEARCH_ISSUES_AND_PRS_PROMPT, SET_ACTIVE_BRANCH_PROMPT, UPDATE_FILE_PROMPT, ) from langchain_community.tools.github.tool import GitHubAction from langchain_community.utilities.github import GitHubAPIWrapper class NoInput(BaseModel): """Schema for operations that do not require any input.""" no_input: str = Field("", description="No input required, e.g. `` (empty string).") class GetIssue(BaseModel): """Schema for operations that require an issue number as input.""" issue_number: int = Field(0, description="Issue number as an integer, e.g. `42`") class CommentOnIssue(BaseModel): """Schema for operations that require a comment as input.""" input: str = Field(..., description="Follow the required formatting.") class GetPR(BaseModel): """Schema for operations that require a PR number as input.""" pr_number: int = Field(0, description="The PR number as an integer, e.g. `12`") class CreatePR(BaseModel): """Schema for operations that require a PR title and body as input.""" formatted_pr: str = Field(..., description="Follow the required formatting.") class CreateFile(BaseModel): """Schema for operations that require a file path and content as input.""" formatted_file: str = Field(..., description="Follow the required formatting.") class ReadFile(BaseModel): """Schema for operations that require a file path as input.""" formatted_filepath: str = Field( ..., description=( "The full file path of the file you would like to read where the " "path must NOT start with a slash, e.g. `some_dir/my_file.py`." ), ) class UpdateFile(BaseModel): """Schema for operations that require a file path and content as input.""" formatted_file_update: str = Field( ..., description="Strictly follow the provided rules." ) class DeleteFile(BaseModel): """Schema for operations that require a file path as input.""" formatted_filepath: str = Field( ..., description=( "The full file path of the file you would like to delete" " where the path must NOT start with a slash, e.g." " `some_dir/my_file.py`. Only input a string," " not the param name." ), ) class DirectoryPath(BaseModel): """Schema for operations that require a directory path as input.""" input: str = Field( "", description=( "The path of the directory, e.g. `some_dir/inner_dir`." " Only input a string, do not include the parameter name." ), ) class BranchName(BaseModel): """Schema for operations that require a branch name as input.""" branch_name: str = Field( ..., description="The name of the branch, e.g. `my_branch`." ) class SearchCode(BaseModel): """Schema for operations that require a search query as input.""" search_query: str = Field( ..., description=( "A keyword-focused natural language search" "query for code, e.g. `MyFunctionName()`." ), ) class CreateReviewRequest(BaseModel): """Schema for operations that require a username as input.""" username: str = Field( ..., description="GitHub username of the user being requested, e.g. `my_username`.", ) class SearchIssuesAndPRs(BaseModel): """Schema for operations that require a search query as input.""" search_query: str = Field( ..., description="Natural language search query, e.g. `My issue title or topic`.", ) class GitHubToolkit(BaseToolkit): """GitHub Toolkit. *Security Note*: This toolkit contains tools that can read and modify the state of a service; e.g., by creating, deleting, or updating, reading underlying data. For example, this toolkit can be used to create issues, pull requests, and comments on GitHub. See [Security](https://python.langchain.com/docs/security) for more information. """ tools: List[BaseTool] = [] @classmethod def from_github_api_wrapper( cls, github_api_wrapper: GitHubAPIWrapper ) -> "GitHubToolkit": operations: List[Dict] = [ { "mode": "get_issues", "name": "Get Issues", "description": GET_ISSUES_PROMPT, "args_schema": NoInput, }, { "mode": "get_issue", "name": "Get Issue", "description": GET_ISSUE_PROMPT, "args_schema": GetIssue, }, { "mode": "comment_on_issue", "name": "Comment on Issue", "description": COMMENT_ON_ISSUE_PROMPT, "args_schema": CommentOnIssue, }, { "mode": "list_open_pull_requests", "name": "List open pull requests (PRs)", "description": LIST_PRS_PROMPT, "args_schema": NoInput, }, { "mode": "get_pull_request", "name": "Get Pull Request", "description": GET_PR_PROMPT, "args_schema": GetPR, }, { "mode": "list_pull_request_files", "name": "Overview of files included in PR", "description": LIST_PULL_REQUEST_FILES, "args_schema": GetPR, }, { "mode": "create_pull_request", "name": "Create Pull Request", "description": CREATE_PULL_REQUEST_PROMPT, "args_schema": CreatePR, }, { "mode": "list_pull_request_files", "name": "List Pull Requests' Files", "description": LIST_PULL_REQUEST_FILES, "args_schema": GetPR, }, { "mode": "create_file", "name": "Create File", "description": CREATE_FILE_PROMPT, "args_schema": CreateFile, }, { "mode": "read_file", "name": "Read File", "description": READ_FILE_PROMPT, "args_schema": ReadFile, }, { "mode": "update_file", "name": "Update File", "description": UPDATE_FILE_PROMPT, "args_schema": UpdateFile, }, { "mode": "delete_file", "name": "Delete File", "description": DELETE_FILE_PROMPT, "args_schema": DeleteFile, }, { "mode": "list_files_in_main_branch", "name": "Overview of existing files in Main branch", "description": OVERVIEW_EXISTING_FILES_IN_MAIN, "args_schema": NoInput, }, { "mode": "list_files_in_bot_branch", "name": "Overview of files in current working branch", "description": OVERVIEW_EXISTING_FILES_BOT_BRANCH, "args_schema": NoInput, }, { "mode": "list_branches_in_repo", "name": "List branches in this repository", "description": LIST_BRANCHES_IN_REPO_PROMPT, "args_schema": NoInput, }, { "mode": "set_active_branch", "name": "Set active branch", "description": SET_ACTIVE_BRANCH_PROMPT, "args_schema": BranchName, }, { "mode": "create_branch", "name": "Create a new branch", "description": CREATE_BRANCH_PROMPT, "args_schema": BranchName, }, { "mode": "get_files_from_directory", "name": "Get files from a directory", "description": GET_FILES_FROM_DIRECTORY_PROMPT, "args_schema": DirectoryPath, }, { "mode": "search_issues_and_prs", "name": "Search issues and pull requests", "description": SEARCH_ISSUES_AND_PRS_PROMPT, "args_schema": SearchIssuesAndPRs, }, { "mode": "search_code", "name": "Search code", "description": SEARCH_CODE_PROMPT, "args_schema": SearchCode, }, { "mode": "create_review_request", "name": "Create review request", "description": CREATE_REVIEW_REQUEST_PROMPT, "args_schema": CreateReviewRequest, }, ] tools = [ GitHubAction( name=action["name"], description=action["description"], mode=action["mode"], api_wrapper=github_api_wrapper, args_schema=action.get("args_schema", None), ) for action in operations ] return cls(tools=tools) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return self.tools
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~news.py
"""Loader that uses unstructured to load HTML files.""" import logging from typing import Any, Iterator, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class NewsURLLoader(BaseLoader): """Load news articles from URLs using `Unstructured`. Args: urls: URLs to load. Each is loaded into its own document. text_mode: If True, extract text from URL and use that for page content. Otherwise, extract raw HTML. nlp: If True, perform NLP on the extracted contents, like providing a summary and extracting keywords. continue_on_failure: If True, continue loading documents even if loading fails for a particular URL. show_progress_bar: If True, use tqdm to show a loading progress bar. Requires tqdm to be installed, ``pip install tqdm``. **newspaper_kwargs: Any additional named arguments to pass to newspaper.Article(). Example: .. code-block:: python from langchain_community.document_loaders import NewsURLLoader loader = NewsURLLoader( urls=["<url-1>", "<url-2>"], ) docs = loader.load() Newspaper reference: https://newspaper.readthedocs.io/en/latest/ """ def __init__( self, urls: List[str], text_mode: bool = True, nlp: bool = False, continue_on_failure: bool = True, show_progress_bar: bool = False, **newspaper_kwargs: Any, ) -> None: """Initialize with file path.""" try: import newspaper # noqa:F401 self.__version = newspaper.__version__ except ImportError: raise ImportError( "newspaper package not found, please install it with " "`pip install newspaper3k`" ) self.urls = urls self.text_mode = text_mode self.nlp = nlp self.continue_on_failure = continue_on_failure self.newspaper_kwargs = newspaper_kwargs self.show_progress_bar = show_progress_bar def load(self) -> List[Document]: iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e iter = tqdm(iter) return list(iter) def lazy_load(self) -> Iterator[Document]: try: from newspaper import Article except ImportError as e: raise ImportError( "Cannot import newspaper, please install with `pip install newspaper3k`" ) from e for url in self.urls: try: article = Article(url, **self.newspaper_kwargs) article.download() article.parse() if self.nlp: article.nlp() except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") continue else: raise e metadata = { "title": getattr(article, "title", ""), "link": getattr(article, "url", getattr(article, "canonical_link", "")), "authors": getattr(article, "authors", []), "language": getattr(article, "meta_lang", ""), "description": getattr(article, "meta_description", ""), "publish_date": getattr(article, "publish_date", ""), } if self.text_mode: content = article.text else: content = article.html if self.nlp: metadata["keywords"] = getattr(article, "keywords", []) metadata["summary"] = getattr(article, "summary", "") yield Document(page_content=content, metadata=metadata)
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~agents~test_initialize.py
"""Test the initialize module.""" from libs.core.langchain_core.tools import tool from langchain.agents.agent_types import AgentType from langchain.agents.initialize import initialize_agent from tests.unit_tests.llms.fake_llm import FakeLLM @tool def my_tool(query: str) -> str: """A fake tool.""" return "fake tool" def test_initialize_agent_with_str_agent_type() -> None: """Test initialize_agent with a string.""" fake_llm = FakeLLM() agent_executor = initialize_agent( [my_tool], # type: ignore[list-item] fake_llm, "zero-shot-react-description", # type: ignore[arg-type] ) assert agent_executor.agent._agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION assert isinstance(agent_executor.tags, list) assert "zero-shot-react-description" in agent_executor.tags
[ "A fake tool." ]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~yandex.py
from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Mapping, Optional from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.load.serializable import Serializable from libs.core.langchain_core.pydantic_v1 import root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain_community.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) class _BaseYandexGPT(Serializable): iam_token: str = "" """Yandex Cloud IAM token for service or user account with the `ai.languageModels.user` role""" api_key: str = "" """Yandex Cloud Api Key for service account with the `ai.languageModels.user` role""" folder_id: str = "" """Yandex Cloud folder ID""" model_uri: str = "" """Model uri to use.""" model_name: str = "yandexgpt-lite" """Model name to use.""" model_version: str = "latest" """Model version to use.""" temperature: float = 0.6 """What sampling temperature to use. Should be a double number between 0 (inclusive) and 1 (inclusive).""" max_tokens: int = 7400 """Sets the maximum limit on the total number of tokens used for both the input prompt and the generated response. Must be greater than zero and not exceed 7400 tokens.""" stop: Optional[List[str]] = None """Sequences when completion generation will stop.""" url: str = "llm.api.cloud.yandex.net:443" """The url of the API.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" @property def _llm_type(self) -> str: return "yandex_gpt" @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_uri": self.model_uri, "temperature": self.temperature, "max_tokens": self.max_tokens, "stop": self.stop, "max_retries": self.max_retries, } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that iam token exists in environment.""" iam_token = get_from_dict_or_env(values, "iam_token", "YC_IAM_TOKEN", "") values["iam_token"] = iam_token api_key = get_from_dict_or_env(values, "api_key", "YC_API_KEY", "") values["api_key"] = api_key folder_id = get_from_dict_or_env(values, "folder_id", "YC_FOLDER_ID", "") values["folder_id"] = folder_id if api_key == "" and iam_token == "": raise ValueError("Either 'YC_API_KEY' or 'YC_IAM_TOKEN' must be provided.") if values["iam_token"]: values["_grpc_metadata"] = [ ("authorization", f"Bearer {values['iam_token']}") ] if values["folder_id"]: values["_grpc_metadata"].append(("x-folder-id", values["folder_id"])) else: values["_grpc_metadata"] = ( ("authorization", f"Api-Key {values['api_key']}"), ) if values["model_uri"] == "" and values["folder_id"] == "": raise ValueError("Either 'model_uri' or 'folder_id' must be provided.") if not values["model_uri"]: values[ "model_uri" ] = f"gpt://{values['folder_id']}/{values['model_name']}/{values['model_version']}" return values class YandexGPT(_BaseYandexGPT, LLM): """Yandex large language models. To use, you should have the ``yandexcloud`` python package installed. There are two authentication options for the service account with the ``ai.languageModels.user`` role: - You can specify the token in a constructor parameter `iam_token` or in an environment variable `YC_IAM_TOKEN`. - You can specify the key in a constructor parameter `api_key` or in an environment variable `YC_API_KEY`. To use the default model specify the folder ID in a parameter `folder_id` or in an environment variable `YC_FOLDER_ID`. Or specify the model URI in a constructor parameter `model_uri` Example: .. code-block:: python from langchain_community.llms import YandexGPT yandex_gpt = YandexGPT(iam_token="t1.9eu...", folder_id="b1g...") """ def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the Yandex GPT model and return the output. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = YandexGPT("Tell me a joke.") """ text = completion_with_retry(self, prompt=prompt) if stop is not None: text = enforce_stop_tokens(text, stop) return text async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Async call the Yandex GPT model and return the output. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. """ text = await acompletion_with_retry(self, prompt=prompt) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _make_request( self: YandexGPT, prompt: str, ) -> str: try: import grpc from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import ( CompletionOptions, Message, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501 CompletionRequest, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501 TextGenerationServiceStub, ) except ImportError as e: raise ImportError( "Please install YandexCloud SDK" " with `pip install yandexcloud`." ) from e channel_credentials = grpc.ssl_channel_credentials() channel = grpc.secure_channel(self.url, channel_credentials) request = CompletionRequest( model_uri=self.model_uri, completion_options=CompletionOptions( temperature=DoubleValue(value=self.temperature), max_tokens=Int64Value(value=self.max_tokens), ), messages=[Message(role="user", text=prompt)], ) stub = TextGenerationServiceStub(channel) res = stub.Completion(request, metadata=self._grpc_metadata) return list(res)[0].alternatives[0].message.text async def _amake_request(self: YandexGPT, prompt: str) -> str: try: import asyncio import grpc from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import ( CompletionOptions, Message, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501 CompletionRequest, CompletionResponse, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501 TextGenerationAsyncServiceStub, ) from yandex.cloud.operation.operation_service_pb2 import GetOperationRequest from yandex.cloud.operation.operation_service_pb2_grpc import ( OperationServiceStub, ) except ImportError as e: raise ImportError( "Please install YandexCloud SDK" " with `pip install yandexcloud`." ) from e operation_api_url = "operation.api.cloud.yandex.net:443" channel_credentials = grpc.ssl_channel_credentials() async with grpc.aio.secure_channel(self.url, channel_credentials) as channel: request = CompletionRequest( model_uri=self.model_uri, completion_options=CompletionOptions( temperature=DoubleValue(value=self.temperature), max_tokens=Int64Value(value=self.max_tokens), ), messages=[Message(role="user", text=prompt)], ) stub = TextGenerationAsyncServiceStub(channel) operation = await stub.Completion(request, metadata=self._grpc_metadata) async with grpc.aio.secure_channel( operation_api_url, channel_credentials ) as operation_channel: operation_stub = OperationServiceStub(operation_channel) while not operation.done: await asyncio.sleep(1) operation_request = GetOperationRequest(operation_id=operation.id) operation = await operation_stub.Get( operation_request, metadata=self._grpc_metadata ) completion_response = CompletionResponse() operation.response.Unpack(completion_response) return completion_response.alternatives[0].message.text def _create_retry_decorator(llm: YandexGPT) -> Callable[[Any], Any]: from grpc import RpcError min_seconds = 1 max_seconds = 60 return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type((RpcError))), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: YandexGPT, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**_kwargs: Any) -> Any: return _make_request(llm, **_kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry(llm: YandexGPT, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**_kwargs: Any) -> Any: return await _amake_request(llm, **_kwargs) return await _completion_with_retry(**kwargs)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~utilities~arxiv.py
"""Util that calls Arxiv.""" import logging import os import re from typing import Any, Dict, List, Optional from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator logger = logging.getLogger(__name__) class ArxivAPIWrapper(BaseModel): """Wrapper around ArxivAPI. To use, you should have the ``arxiv`` python package installed. https://lukasschwab.me/arxiv.py/index.html This wrapper will use the Arxiv API to conduct searches and fetch document summaries. By default, it will return the document summaries of the top-k results. If the query is in the form of arxiv identifier (see https://info.arxiv.org/help/find/index.html), it will return the paper corresponding to the arxiv identifier. It limits the Document content by doc_content_chars_max. Set doc_content_chars_max=None if you don't want to limit the content size. Attributes: top_k_results: number of the top-scored document used for the arxiv tool ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool. load_max_docs: a limit to the number of loaded documents load_all_available_meta: if True: the `metadata` of the loaded Documents contains all available meta info (see https://lukasschwab.me/arxiv.py/index.html#Result), if False: the `metadata` contains only the published date, title, authors and summary. doc_content_chars_max: an optional cut limit for the length of a document's content Example: .. code-block:: python from langchain_community.utilities.arxiv import ArxivAPIWrapper arxiv = ArxivAPIWrapper( top_k_results = 3, ARXIV_MAX_QUERY_LENGTH = 300, load_max_docs = 3, load_all_available_meta = False, doc_content_chars_max = 40000 ) arxiv.run("tree of thought llm) """ arxiv_search: Any #: :meta private: arxiv_exceptions: Any # :meta private: top_k_results: int = 3 ARXIV_MAX_QUERY_LENGTH: int = 300 load_max_docs: int = 100 load_all_available_meta: bool = False doc_content_chars_max: Optional[int] = 4000 def is_arxiv_identifier(self, query: str) -> bool: """Check if a query is an arxiv identifier.""" arxiv_identifier_pattern = r"\d{2}(0[1-9]|1[0-2])\.\d{4,5}(v\d+|)|\d{7}.*" for query_item in query[: self.ARXIV_MAX_QUERY_LENGTH].split(): match_result = re.match(arxiv_identifier_pattern, query_item) if not match_result: return False assert match_result is not None if not match_result.group(0) == query_item: return False return True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: import arxiv values["arxiv_search"] = arxiv.Search values["arxiv_exceptions"] = ( arxiv.ArxivError, arxiv.UnexpectedEmptyPageError, arxiv.HTTPError, ) values["arxiv_result"] = arxiv.Result except ImportError: raise ImportError( "Could not import arxiv python package. " "Please install it with `pip install arxiv`." ) return values def get_summaries_as_docs(self, query: str) -> List[Document]: """ Performs an arxiv search and returns list of documents, with summaries as the content. If an error occurs or no documents found, error text is returned instead. Wrapper for https://lukasschwab.me/arxiv.py/index.html#Search Args: query: a plaintext search query """ # noqa: E501 try: if self.is_arxiv_identifier(query): results = self.arxiv_search( id_list=query.split(), max_results=self.top_k_results, ).results() else: results = self.arxiv_search( # type: ignore query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results ).results() except self.arxiv_exceptions as ex: return [Document(page_content=f"Arxiv exception: {ex}")] docs = [ Document( page_content=result.summary, metadata={ "Entry ID": result.entry_id, "Published": result.updated.date(), "Title": result.title, "Authors": ", ".join(a.name for a in result.authors), }, ) for result in results ] return docs def run(self, query: str) -> str: """ Performs an arxiv search and A single string with the publish date, title, authors, and summary for each article separated by two newlines. If an error occurs or no documents found, error text is returned instead. Wrapper for https://lukasschwab.me/arxiv.py/index.html#Search Args: query: a plaintext search query """ # noqa: E501 try: if self.is_arxiv_identifier(query): results = self.arxiv_search( id_list=query.split(), max_results=self.top_k_results, ).results() else: results = self.arxiv_search( # type: ignore query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results ).results() except self.arxiv_exceptions as ex: return f"Arxiv exception: {ex}" docs = [ f"Published: {result.updated.date()}\n" f"Title: {result.title}\n" f"Authors: {', '.join(a.name for a in result.authors)}\n" f"Summary: {result.summary}" for result in results ] if docs: return "\n\n".join(docs)[: self.doc_content_chars_max] else: return "No good Arxiv Result was found" def load(self, query: str) -> List[Document]: """ Run Arxiv search and get the article texts plus the article meta information. See https://lukasschwab.me/arxiv.py/index.html#Search Returns: a list of documents with the document.page_content in text format Performs an arxiv search, downloads the top k results as PDFs, loads them as Documents, and returns them in a List. Args: query: a plaintext search query """ # noqa: E501 try: import fitz except ImportError: raise ImportError( "PyMuPDF package not found, please install it with " "`pip install pymupdf`" ) try: # Remove the ":" and "-" from the query, as they can cause search problems query = query.replace(":", "").replace("-", "") if self.is_arxiv_identifier(query): results = self.arxiv_search( id_list=query[: self.ARXIV_MAX_QUERY_LENGTH].split(), max_results=self.load_max_docs, ).results() else: results = self.arxiv_search( # type: ignore query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.load_max_docs ).results() except self.arxiv_exceptions as ex: logger.debug("Error on arxiv: %s", ex) return [] docs: List[Document] = [] for result in results: try: doc_file_name: str = result.download_pdf() with fitz.open(doc_file_name) as doc_file: text: str = "".join(page.get_text() for page in doc_file) except (FileNotFoundError, fitz.fitz.FileDataError) as f_ex: logger.debug(f_ex) continue if self.load_all_available_meta: extra_metadata = { "entry_id": result.entry_id, "published_first_time": str(result.published.date()), "comment": result.comment, "journal_ref": result.journal_ref, "doi": result.doi, "primary_category": result.primary_category, "categories": result.categories, "links": [link.href for link in result.links], } else: extra_metadata = {} metadata = { "Published": str(result.updated.date()), "Title": result.title, "Authors": ", ".join(a.name for a in result.authors), "Summary": result.summary, **extra_metadata, } doc = Document( page_content=text[: self.doc_content_chars_max], metadata=metadata ) docs.append(doc) os.remove(doc_file_name) return docs
[]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~prompts~test_ngram_overlap_example_selector.py
"""Test functionality related to ngram overlap based selector.""" import pytest from libs.core.langchain_core.prompts import PromptTemplate from langchain.prompts.example_selector.ngram_overlap import ( NGramOverlapExampleSelector, ngram_overlap_score, ) EXAMPLES = [ {"input": "See Spot run.", "output": "foo1"}, {"input": "My dog barks.", "output": "foo2"}, {"input": "Spot can run.", "output": "foo3"}, ] @pytest.fixture def selector() -> NGramOverlapExampleSelector: """Get ngram overlap based selector to use in tests.""" prompts = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}" ) selector = NGramOverlapExampleSelector( examples=EXAMPLES, example_prompt=prompts, ) return selector def test_selector_valid(selector: NGramOverlapExampleSelector) -> None: """Test NGramOverlapExampleSelector can select examples.""" sentence = "Spot can run." output = selector.select_examples({"input": sentence}) assert output == [EXAMPLES[2], EXAMPLES[0], EXAMPLES[1]] def test_selector_add_example(selector: NGramOverlapExampleSelector) -> None: """Test NGramOverlapExampleSelector can add an example.""" new_example = {"input": "Spot plays fetch.", "output": "foo4"} selector.add_example(new_example) sentence = "Spot can run." output = selector.select_examples({"input": sentence}) assert output == [EXAMPLES[2], EXAMPLES[0]] + [new_example] + [EXAMPLES[1]] def test_selector_threshold_zero(selector: NGramOverlapExampleSelector) -> None: """Tests NGramOverlapExampleSelector threshold set to 0.0.""" selector.threshold = 0.0 sentence = "Spot can run." output = selector.select_examples({"input": sentence}) assert output == [EXAMPLES[2], EXAMPLES[0]] def test_selector_threshold_more_than_one( selector: NGramOverlapExampleSelector, ) -> None: """Tests NGramOverlapExampleSelector threshold greater than 1.0.""" selector.threshold = 1.0 + 1e-9 sentence = "Spot can run." output = selector.select_examples({"input": sentence}) assert output == [] def test_ngram_overlap_score(selector: NGramOverlapExampleSelector) -> None: """Tests that ngram_overlap_score returns correct values.""" selector.threshold = 1.0 + 1e-9 none = ngram_overlap_score(["Spot can run."], ["My dog barks."]) some = ngram_overlap_score(["Spot can run."], ["See Spot run."]) complete = ngram_overlap_score(["Spot can run."], ["Spot can run."]) check = [abs(none - 0.0) < 1e-9, 0.0 < some < 1.0, abs(complete - 1.0) < 1e-9] assert check == [True, True, True]
[ "Input: {input}\nOutput: {output}", "input" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~edenai~image_objectdetection.py
from __future__ import annotations import logging from typing import Optional from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool logger = logging.getLogger(__name__) class EdenAiObjectDetectionTool(EdenaiTool): """Tool that queries the Eden AI Object detection API. for api reference check edenai documentation: https://docs.edenai.co/reference/image_object_detection_create. To use, you should have the environment variable ``EDENAI_API_KEY`` set with your API token. You can find your token here: https://app.edenai.run/admin/account/settings """ name = "edenai_object_detection" description = ( "A wrapper around edenai Services Object Detection . " """Useful for when you have to do an to identify and locate (with bounding boxes) objects in an image """ "Input should be the string url of the image to identify." ) show_positions: bool = False feature = "image" subfeature = "object_detection" def _parse_json(self, json_data: dict) -> str: result = [] label_info = [] for found_obj in json_data["items"]: label_str = f"{found_obj['label']} - Confidence {found_obj['confidence']}" x_min = found_obj.get("x_min") x_max = found_obj.get("x_max") y_min = found_obj.get("y_min") y_max = found_obj.get("y_max") if self.show_positions and all( [x_min, x_max, y_min, y_max] ): # some providers don't return positions label_str += f""",at the position x_min: {x_min}, x_max: {x_max}, y_min: {y_min}, y_max: {y_max}""" label_info.append(label_str) result.append("\n".join(label_info)) return "\n\n".join(result) def _parse_response(self, response: list) -> str: if len(response) == 1: result = self._parse_json(response[0]) else: for entry in response: if entry.get("provider") == "eden-ai": result = self._parse_json(entry) return result def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" query_params = {"file_url": query, "attributes_as_list": False} return self._call_eden_ai(query_params)
[]
2024-01-10
mth93/langchain
libs~community~tests~unit_tests~llms~test_anyscale.py
"""Test Anyscale llm""" import pytest from libs.core.langchain_core.pydantic_v1 import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.anyscale import Anyscale @pytest.mark.requires("openai") def test_api_key_is_secret_string() -> None: llm = Anyscale( anyscale_api_key="secret-api-key", anyscale_api_base="test", model_name="test" ) assert isinstance(llm.anyscale_api_key, SecretStr) @pytest.mark.requires("openai") def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("ANYSCALE_API_KEY", "secret-api-key") llm = Anyscale(anyscale_api_base="test", model_name="test") print(llm.anyscale_api_key, end="") captured = capsys.readouterr() assert captured.out == "**********" @pytest.mark.requires("openai") def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" llm = Anyscale( anyscale_api_key="secret-api-key", anyscale_api_base="test", model_name="test" ) print(llm.anyscale_api_key, end="") captured = capsys.readouterr() assert captured.out == "**********"
[]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~cache~test_gptcache.py
import os from typing import Any, Callable, Union import pytest from libs.core.langchain_core.outputs import Generation from langchain.cache import GPTCache from langchain.globals import get_llm_cache, set_llm_cache from tests.unit_tests.llms.fake_llm import FakeLLM try: from gptcache import Cache # noqa: F401 from gptcache.manager.factory import get_data_manager from gptcache.processor.pre import get_prompt gptcache_installed = True except ImportError: gptcache_installed = False def init_gptcache_map(cache_obj: Any) -> None: i = getattr(init_gptcache_map, "_i", 0) cache_path = f"data_map_{i}.txt" if os.path.isfile(cache_path): os.remove(cache_path) cache_obj.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=cache_path), ) init_gptcache_map._i = i + 1 # type: ignore def init_gptcache_map_with_llm(cache_obj: Any, llm: str) -> None: cache_path = f"data_map_{llm}.txt" if os.path.isfile(cache_path): os.remove(cache_path) cache_obj.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=cache_path), ) @pytest.mark.skipif(not gptcache_installed, reason="gptcache not installed") @pytest.mark.parametrize( "init_func", [None, init_gptcache_map, init_gptcache_map_with_llm] ) def test_gptcache_caching( init_func: Union[Callable[[Any, str], None], Callable[[Any], None], None], ) -> None: """Test gptcache default caching behavior.""" set_llm_cache(GPTCache(init_func)) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) _ = llm.generate(["foo", "bar", "foo"]) cache_output = get_llm_cache().lookup("foo", llm_string) assert cache_output == [Generation(text="fizz")] get_llm_cache().clear() assert get_llm_cache().lookup("bar", llm_string) is None
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_momento_vector_index.py
import os import time import uuid from typing import Generator, Iterator, List import pytest from libs.core.langchain_core.documents import Document from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import MomentoVectorIndex API_KEY_ENV_VAR = "MOMENTO_API_KEY" def random_string() -> str: return str(uuid.uuid4()) @pytest.fixture(scope="function") def random_index_name() -> str: return f"langchain-test-index-{random_string()}" def wait() -> None: time.sleep(1) @pytest.fixture(scope="module") def embedding_openai() -> OpenAIEmbeddings: if not os.environ.get("OPENAI_API_KEY"): raise ValueError("OPENAI_API_KEY is not set") return OpenAIEmbeddings() @pytest.fixture(scope="function") def texts() -> Generator[List[str], None, None]: # Load the documents from a file located in the fixtures directory documents = TextLoader( os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt") ).load() yield [doc.page_content for doc in documents] @pytest.fixture(scope="function") def vector_store( embedding_openai: OpenAIEmbeddings, random_index_name: str ) -> Iterator[MomentoVectorIndex]: from momento import ( CredentialProvider, PreviewVectorIndexClient, VectorIndexConfigurations, ) vector_store = None try: client = PreviewVectorIndexClient( VectorIndexConfigurations.Default.latest(), credential_provider=CredentialProvider.from_environment_variable( API_KEY_ENV_VAR ), ) vector_store = MomentoVectorIndex( embedding=embedding_openai, client=client, index_name=random_index_name, ) yield vector_store finally: if vector_store is not None: vector_store._client.delete_index(random_index_name) def test_from_texts( random_index_name: str, embedding_openai: OpenAIEmbeddings, texts: List[str] ) -> None: from momento import ( CredentialProvider, VectorIndexConfigurations, ) random_text = random_string() random_document = f"Hello world {random_text} goodbye world!" texts.insert(0, random_document) vector_store = None try: vector_store = MomentoVectorIndex.from_texts( texts=texts, embedding=embedding_openai, index_name=random_index_name, configuration=VectorIndexConfigurations.Default.latest(), credential_provider=CredentialProvider.from_environment_variable( "MOMENTO_API_KEY" ), ) wait() documents = vector_store.similarity_search(query=random_text, k=1) assert documents == [Document(page_content=random_document)] finally: if vector_store is not None: vector_store._client.delete_index(random_index_name) def test_from_texts_with_metadatas( random_index_name: str, embedding_openai: OpenAIEmbeddings, texts: List[str] ) -> None: """Test end to end construction and search.""" from momento import ( CredentialProvider, VectorIndexConfigurations, ) random_text = random_string() random_document = f"Hello world {random_text} goodbye world!" texts.insert(0, random_document) metadatas = [{"page": f"{i}", "source": "user"} for i in range(len(texts))] vector_store = None try: vector_store = MomentoVectorIndex.from_texts( texts=texts, embedding=embedding_openai, index_name=random_index_name, metadatas=metadatas, configuration=VectorIndexConfigurations.Default.latest(), credential_provider=CredentialProvider.from_environment_variable( API_KEY_ENV_VAR ), ) wait() documents = vector_store.similarity_search(query=random_text, k=1) assert documents == [ Document( page_content=random_document, metadata={"page": "0", "source": "user"} ) ] finally: if vector_store is not None: vector_store._client.delete_index(random_index_name) def test_from_texts_with_scores(vector_store: MomentoVectorIndex) -> None: """Test end to end construction and search with scores and IDs.""" texts = ["apple", "orange", "hammer"] metadatas = [{"page": f"{i}"} for i in range(len(texts))] vector_store.add_texts(texts, metadatas) wait() search_results = vector_store.similarity_search_with_score("apple", k=3) docs = [o[0] for o in search_results] scores = [o[1] for o in search_results] assert docs == [ Document(page_content="apple", metadata={"page": "0"}), Document(page_content="orange", metadata={"page": "1"}), Document(page_content="hammer", metadata={"page": "2"}), ] assert scores[0] > scores[1] > scores[2] def test_add_documents_with_ids(vector_store: MomentoVectorIndex) -> None: """Test end to end construction and search with scores and IDs.""" from momento.responses.vector_index import Search texts = ["apple", "orange", "hammer"] ids = [random_string() for _ in range(len(texts))] metadatas = [{"page": f"{i}"} for i in range(len(texts))] # Add texts with metadata and ids stored_ids = vector_store.add_texts(texts, metadatas, ids=ids) assert stored_ids == ids wait() # Verify that the ids are in the index response = vector_store._client.search( vector_store.index_name, vector_store.embeddings.embed_query("apple") ) assert isinstance(response, Search.Success) assert [hit.id for hit in response.hits] == ids def test_max_marginal_relevance_search(vector_store: MomentoVectorIndex) -> None: """Test max marginal relevance search.""" pepperoni_pizza = "pepperoni pizza" cheese_pizza = "cheese pizza" hot_dog = "hot dog" vector_store.add_texts([pepperoni_pizza, cheese_pizza, hot_dog]) wait() search_results = vector_store.similarity_search("pizza", k=2) assert search_results == [ Document(page_content=pepperoni_pizza, metadata={}), Document(page_content=cheese_pizza, metadata={}), ] search_results = vector_store.max_marginal_relevance_search(query="pizza", k=2) assert search_results == [ Document(page_content=pepperoni_pizza, metadata={}), Document(page_content=hot_dog, metadata={}), ]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~joplin.py
import json import urllib from datetime import datetime from typing import Iterator, List, Optional from libs.core.langchain_core.documents import Document from libs.core.langchain_core.utils import get_from_env from langchain_community.document_loaders.base import BaseLoader LINK_NOTE_TEMPLATE = "joplin://x-callback-url/openNote?id={id}" class JoplinLoader(BaseLoader): """Load notes from `Joplin`. In order to use this loader, you need to have Joplin running with the Web Clipper enabled (look for "Web Clipper" in the app settings). To get the access token, you need to go to the Web Clipper options and under "Advanced Options" you will find the access token. You can find more information about the Web Clipper service here: https://joplinapp.org/clipper/ """ def __init__( self, access_token: Optional[str] = None, port: int = 41184, host: str = "localhost", ) -> None: """ Args: access_token: The access token to use. port: The port where the Web Clipper service is running. Default is 41184. host: The host where the Web Clipper service is running. Default is localhost. """ access_token = access_token or get_from_env( "access_token", "JOPLIN_ACCESS_TOKEN" ) base_url = f"http://{host}:{port}" self._get_note_url = ( f"{base_url}/notes?token={access_token}" f"&fields=id,parent_id,title,body,created_time,updated_time&page={{page}}" ) self._get_folder_url = ( f"{base_url}/folders/{{id}}?token={access_token}&fields=title" ) self._get_tag_url = ( f"{base_url}/notes/{{id}}/tags?token={access_token}&fields=title" ) def _get_notes(self) -> Iterator[Document]: has_more = True page = 1 while has_more: req_note = urllib.request.Request(self._get_note_url.format(page=page)) with urllib.request.urlopen(req_note) as response: json_data = json.loads(response.read().decode()) for note in json_data["items"]: metadata = { "source": LINK_NOTE_TEMPLATE.format(id=note["id"]), "folder": self._get_folder(note["parent_id"]), "tags": self._get_tags(note["id"]), "title": note["title"], "created_time": self._convert_date(note["created_time"]), "updated_time": self._convert_date(note["updated_time"]), } yield Document(page_content=note["body"], metadata=metadata) has_more = json_data["has_more"] page += 1 def _get_folder(self, folder_id: str) -> str: req_folder = urllib.request.Request(self._get_folder_url.format(id=folder_id)) with urllib.request.urlopen(req_folder) as response: json_data = json.loads(response.read().decode()) return json_data["title"] def _get_tags(self, note_id: str) -> List[str]: req_tag = urllib.request.Request(self._get_tag_url.format(id=note_id)) with urllib.request.urlopen(req_tag) as response: json_data = json.loads(response.read().decode()) return [tag["title"] for tag in json_data["items"]] def _convert_date(self, date: int) -> str: return datetime.fromtimestamp(date / 1000).strftime("%Y-%m-%d %H:%M:%S") def lazy_load(self) -> Iterator[Document]: yield from self._get_notes() def load(self) -> List[Document]: return list(self.lazy_load())
[ "joplin://x-callback-url/openNote?id={id}" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~oci_data_science_model_deployment_endpoint.py
import logging from typing import Any, Dict, List, Optional import requests from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import Field, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env logger = logging.getLogger(__name__) DEFAULT_TIME_OUT = 300 DEFAULT_CONTENT_TYPE_JSON = "application/json" class OCIModelDeploymentLLM(LLM): """Base class for LLM deployed on OCI Data Science Model Deployment.""" auth: dict = Field(default_factory=dict, exclude=True) """ADS auth dictionary for OCI authentication: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html. This can be generated by calling `ads.common.auth.api_keys()` or `ads.common.auth.resource_principal()`. If this is not provided then the `ads.common.default_signer()` will be used.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.2 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: float = 0.75 """Total probability mass of tokens to consider at each step.""" endpoint: str = "" """The uri of the endpoint from the deployed Model Deployment model.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). """ stop: Optional[List[str]] = None """Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings.""" @root_validator() def validate_environment( # pylint: disable=no-self-argument cls, values: Dict ) -> Dict: """Validate that python package exists in environment.""" try: import ads except ImportError as ex: raise ImportError( "Could not import ads python package. " "Please install it with `pip install oracle_ads`." ) from ex if not values.get("auth", None): values["auth"] = ads.common.auth.default_signer() values["endpoint"] = get_from_dict_or_env( values, "endpoint", "OCI_LLM_ENDPOINT", ) return values @property def _default_params(self) -> Dict[str, Any]: """Default parameters for the model.""" raise NotImplementedError @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { **{"endpoint": self.endpoint}, **self._default_params, } def _construct_json_body(self, prompt: str, params: dict) -> dict: """Constructs the request body as a dictionary (JSON).""" raise NotImplementedError def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict: """Combines the invocation parameters with default parameters.""" params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop"] = self.stop elif stop is not None: params["stop"] = stop else: # Don't set "stop" in param as None. It should be a list. params["stop"] = [] return {**params, **kwargs} def _process_response(self, response_json: dict) -> str: raise NotImplementedError def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to OCI Data Science Model Deployment endpoint. Args: prompt (str): The prompt to pass into the model. stop (List[str], Optional): List of stop words to use when generating. kwargs: requests_kwargs: Additional ``**kwargs`` to pass to requests.post Returns: The string generated by the model. Example: .. code-block:: python response = oci_md("Tell me a joke.") """ requests_kwargs = kwargs.pop("requests_kwargs", {}) params = self._invocation_params(stop, **kwargs) body = self._construct_json_body(prompt, params) logger.info(f"LLM API Request:\n{prompt}") response = self._send_request( data=body, endpoint=self.endpoint, **requests_kwargs ) completion = self._process_response(response) logger.info(f"LLM API Completion:\n{completion}") return completion def _send_request( self, data: Any, endpoint: str, header: Optional[dict] = {}, **kwargs: Any, ) -> Dict: """Sends request to the oci data science model deployment endpoint. Args: data (Json serializable): data need to be sent to the endpoint. endpoint (str): The model HTTP endpoint. header (dict, optional): A dictionary of HTTP headers to send to the specified url. Defaults to {}. kwargs: Additional ``**kwargs`` to pass to requests.post. Raises: Exception: Raise when invoking fails. Returns: A JSON representation of a requests.Response object. """ if not header: header = {} header["Content-Type"] = ( header.pop("content_type", DEFAULT_CONTENT_TYPE_JSON) or DEFAULT_CONTENT_TYPE_JSON ) request_kwargs = {"json": data} request_kwargs["headers"] = header timeout = kwargs.pop("timeout", DEFAULT_TIME_OUT) attempts = 0 while attempts < 2: request_kwargs["auth"] = self.auth.get("signer") response = requests.post( endpoint, timeout=timeout, **request_kwargs, **kwargs ) if response.status_code == 401: self._refresh_signer() attempts += 1 continue break try: response.raise_for_status() response_json = response.json() except Exception: logger.error( "DEBUG INFO: request_kwargs=%s, status_code=%s, content=%s", request_kwargs, response.status_code, response.content, ) raise return response_json def _refresh_signer(self) -> None: if self.auth.get("signer", None) and hasattr( self.auth["signer"], "refresh_security_token" ): self.auth["signer"].refresh_security_token() class OCIModelDeploymentTGI(OCIModelDeploymentLLM): """OCI Data Science Model Deployment TGI Endpoint. To use, you must provide the model HTTP endpoint from your deployed model, e.g. https://<MD_OCID>/predict. To authenticate, `oracle-ads` has been used to automatically load credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html Make sure to have the required policies to access the OCI Data Science Model Deployment endpoint. See: https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint Example: .. code-block:: python from langchain.llms import ModelDeploymentTGI oci_md = ModelDeploymentTGI(endpoint="https://<MD_OCID>/predict") """ do_sample: bool = True """If set to True, this parameter enables decoding strategies such as multi-nominal sampling, beam-search multi-nominal sampling, Top-K sampling and Top-p sampling. """ watermark = True """Watermarking with `A Watermark for Large Language Models <https://arxiv.org/abs/2301.10226>`_. Defaults to True.""" return_full_text = False """Whether to prepend the prompt to the generated text. Defaults to False.""" @property def _llm_type(self) -> str: """Return type of llm.""" return "oci_model_deployment_tgi_endpoint" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for invoking OCI model deployment TGI endpoint.""" return { "best_of": self.best_of, "max_new_tokens": self.max_tokens, "temperature": self.temperature, "top_k": self.k if self.k > 0 else None, # `top_k` must be strictly positive' "top_p": self.p, "do_sample": self.do_sample, "return_full_text": self.return_full_text, "watermark": self.watermark, } def _construct_json_body(self, prompt: str, params: dict) -> dict: return { "inputs": prompt, "parameters": params, } def _process_response(self, response_json: dict) -> str: return str(response_json.get("generated_text", response_json)) + "\n" class OCIModelDeploymentVLLM(OCIModelDeploymentLLM): """VLLM deployed on OCI Data Science Model Deployment To use, you must provide the model HTTP endpoint from your deployed model, e.g. https://<MD_OCID>/predict. To authenticate, `oracle-ads` has been used to automatically load credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html Make sure to have the required policies to access the OCI Data Science Model Deployment endpoint. See: https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint Example: .. code-block:: python from langchain.llms import OCIModelDeploymentVLLM oci_md = OCIModelDeploymentVLLM( endpoint="https://<MD_OCID>/predict", model="mymodel" ) """ model: str """The name of the model.""" n: int = 1 """Number of output sequences to return for the given prompt.""" k: int = -1 """Number of most likely tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" use_beam_search: bool = False """Whether to use beam search instead of sampling.""" ignore_eos: bool = False """Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.""" logprobs: Optional[int] = None """Number of log probabilities to return per output token.""" @property def _llm_type(self) -> str: """Return type of llm.""" return "oci_model_deployment_vllm_endpoint" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling vllm.""" return { "best_of": self.best_of, "frequency_penalty": self.frequency_penalty, "ignore_eos": self.ignore_eos, "logprobs": self.logprobs, "max_tokens": self.max_tokens, "model": self.model, "n": self.n, "presence_penalty": self.presence_penalty, "stop": self.stop, "temperature": self.temperature, "top_k": self.k, "top_p": self.p, "use_beam_search": self.use_beam_search, } def _construct_json_body(self, prompt: str, params: dict) -> dict: return { "prompt": prompt, **params, } def _process_response(self, response_json: dict) -> str: return response_json["choices"][0]["text"]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~awadb.py
from __future__ import annotations import logging import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Type import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import awadb logger = logging.getLogger() DEFAULT_TOPN = 4 class AwaDB(VectorStore): """`AwaDB` vector store.""" _DEFAULT_TABLE_NAME = "langchain_awadb" def __init__( self, table_name: str = _DEFAULT_TABLE_NAME, embedding: Optional[Embeddings] = None, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> None: """Initialize with AwaDB client. If table_name is not specified, a random table name of `_DEFAULT_TABLE_NAME + last segment of uuid` would be created automatically. Args: table_name: Name of the table created, default _DEFAULT_TABLE_NAME. embedding: Optional Embeddings initially set. log_and_data_dir: Optional the root directory of log and data. client: Optional AwaDB client. kwargs: Any possible extend parameters in the future. Returns: None. """ try: import awadb except ImportError: raise ImportError( "Could not import awadb python package. " "Please install it with `pip install awadb`." ) if client is not None: self.awadb_client = client else: if log_and_data_dir is not None: self.awadb_client = awadb.Client(log_and_data_dir) else: self.awadb_client = awadb.Client() if table_name == self._DEFAULT_TABLE_NAME: table_name += "_" table_name += str(uuid.uuid4()).split("-")[-1] self.awadb_client.Create(table_name) self.table2embeddings: dict[str, Embeddings] = {} if embedding is not None: self.table2embeddings[table_name] = embedding self.using_table_name = table_name @property def embeddings(self) -> Optional[Embeddings]: if self.using_table_name in self.table2embeddings: return self.table2embeddings[self.using_table_name] return None def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, is_duplicate_texts: Optional[bool] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. is_duplicate_texts: Optional whether to duplicate texts. Defaults to True. kwargs: any possible extend parameters in the future. Returns: List of ids from adding the texts into the vectorstore. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embeddings = None if self.using_table_name in self.table2embeddings: embeddings = self.table2embeddings[self.using_table_name].embed_documents( list(texts) ) return self.awadb_client.AddTexts( "embedding_text", "text_embedding", texts, embeddings, metadatas, is_duplicate_texts, ) def load_local( self, table_name: str, **kwargs: Any, ) -> bool: """Load the local specified table. Args: table_name: Table name kwargs: Any possible extend parameters in the future. Returns: Success or failure of loading the local specified table """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") return self.awadb_client.Load(table_name) def similarity_search( self, query: str, k: int = DEFAULT_TOPN, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text query. k: The maximum number of documents to return. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. E.g. `{"color" : "red", "price": 4.20}`. Optional. E.g. `{"max_price" : 15.66, "min_price": 4.20}` `price` is the metadata field, means range filter(4.20<'price'<15.66). E.g. `{"maxe_price" : 15.66, "mine_price": 4.20}` `price` is the metadata field, means range filter(4.20<='price'<=15.66). kwargs: Any possible extend parameters in the future. Returns: Returns the k most similar documents to the specified text query. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) else: from awadb import AwaEmbedding embedding = AwaEmbedding().Embedding(query) not_include_fields: Set[str] = {"text_embedding", "_id", "score"} return self.similarity_search_by_vector( embedding, k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields_in_metadata=not_include_fields, ) def similarity_search_with_score( self, query: str, k: int = DEFAULT_TOPN, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """The most k similar documents and scores of the specified query. Args: query: Text query. k: The k most similar documents to the text query. text_in_page_content: Filter by the text in page_content of Document. meta_filter: Filter by metadata. Defaults to None. kwargs: Any possible extend parameters in the future. Returns: The k most similar documents to the specified text query. 0 is dissimilar, 1 is the most similar. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) else: from awadb import AwaEmbedding embedding = AwaEmbedding().Embedding(query) results: List[Tuple[Document, float]] = [] not_include_fields: Set[str] = {"text_embedding", "_id"} retrieval_docs = self.similarity_search_by_vector( embedding, k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields_in_metadata=not_include_fields, ) for doc in retrieval_docs: score = doc.metadata["score"] del doc.metadata["score"] doc_tuple = (doc, score) results.append(doc_tuple) return results def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: return self.similarity_search_with_score(query, k, **kwargs) def similarity_search_by_vector( self, embedding: Optional[List[float]] = None, k: int = DEFAULT_TOPN, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, not_include_fields_in_metadata: Optional[Set[str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. text_in_page_content: Filter by the text in page_content of Document. meta_filter: Filter by metadata. Defaults to None. not_incude_fields_in_metadata: Not include meta fields of each document. Returns: List of Documents which are the most similar to the query vector. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") results: List[Document] = [] if embedding is None: return results show_results = self.awadb_client.Search( embedding, k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields=not_include_fields_in_metadata, ) if show_results.__len__() == 0: return results for item_detail in show_results[0]["ResultItems"]: content = "" meta_data = {} for item_key in item_detail: if item_key == "embedding_text": content = item_detail[item_key] continue elif not_include_fields_in_metadata is not None: if item_key in not_include_fields_in_metadata: continue meta_data[item_key] = item_detail[item_key] results.append(Document(page_content=content, metadata=meta_data)) return results def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding: List[float] = [] if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) else: from awadb import AwaEmbedding embedding = AwaEmbedding().Embedding(query) if embedding.__len__() == 0: return [] results = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult, text_in_page_content=text_in_page_content, meta_filter=meta_filter, ) return results def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") results: List[Document] = [] if embedding is None: return results not_include_fields: set = {"_id", "score"} retrieved_docs = self.similarity_search_by_vector( embedding, fetch_k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields_in_metadata=not_include_fields, ) top_embeddings = [] for doc in retrieved_docs: top_embeddings.append(doc.metadata["text_embedding"]) selected_docs = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embedding_list=top_embeddings ) for s_id in selected_docs: if "text_embedding" in retrieved_docs[s_id].metadata: del retrieved_docs[s_id].metadata["text_embedding"] results.append(retrieved_docs[s_id]) return results def get( self, ids: Optional[List[str]] = None, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, not_include_fields: Optional[Set[str]] = None, limit: Optional[int] = None, **kwargs: Any, ) -> Dict[str, Document]: """Return docs according ids. Args: ids: The ids of the embedding vectors. text_in_page_content: Filter by the text in page_content of Document. meta_filter: Filter by any metadata of the document. not_include_fields: Not pack the specified fields of each document. limit: The number of documents to return. Defaults to 5. Optional. Returns: Documents which satisfy the input conditions. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") docs_detail = self.awadb_client.Get( ids=ids, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields=not_include_fields, limit=limit, ) results: Dict[str, Document] = {} for doc_detail in docs_detail: content = "" meta_info = {} for field in doc_detail: if field == "embedding_text": content = doc_detail[field] continue elif field == "text_embedding" or field == "_id": continue meta_info[field] = doc_detail[field] doc = Document(page_content=content, metadata=meta_info) results[doc_detail["_id"]] = doc return results def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Optional[bool]: """Delete the documents which have the specified ids. Args: ids: The ids of the embedding vectors. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful. False otherwise, None if not implemented. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") ret: Optional[bool] = None if ids is None or ids.__len__() == 0: return ret ret = self.awadb_client.Delete(ids) return ret def update( self, ids: List[str], texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Update the documents which have the specified ids. Args: ids: The id list of the updating embedding vector. texts: The texts of the updating documents. metadatas: The metadatas of the updating documents. Returns: the ids of the updated documents. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") return self.awadb_client.UpdateTexts( ids=ids, text_field_name="embedding_text", texts=texts, metadatas=metadatas ) def create_table( self, table_name: str, **kwargs: Any, ) -> bool: """Create a new table.""" if self.awadb_client is None: return False ret = self.awadb_client.Create(table_name) if ret: self.using_table_name = table_name return ret def use( self, table_name: str, **kwargs: Any, ) -> bool: """Use the specified table. Don't know the tables, please invoke list_tables.""" if self.awadb_client is None: return False ret = self.awadb_client.Use(table_name) if ret: self.using_table_name = table_name return ret def list_tables( self, **kwargs: Any, ) -> List[str]: """List all the tables created by the client.""" if self.awadb_client is None: return [] return self.awadb_client.ListAllTables() def get_current_table( self, **kwargs: Any, ) -> str: """Get the current table.""" return self.using_table_name @classmethod def from_texts( cls: Type[AwaDB], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, table_name: str = _DEFAULT_TABLE_NAME, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> AwaDB: """Create an AwaDB vectorstore from a raw documents. Args: texts (List[str]): List of texts to add to the table. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. table_name (str): Name of the table to create. log_and_data_dir (Optional[str]): Directory of logging and persistence. client (Optional[awadb.Client]): AwaDB client Returns: AwaDB: AwaDB vectorstore. """ awadb_client = cls( table_name=table_name, embedding=embedding, log_and_data_dir=log_and_data_dir, client=client, ) awadb_client.add_texts(texts=texts, metadatas=metadatas) return awadb_client @classmethod def from_documents( cls: Type[AwaDB], documents: List[Document], embedding: Optional[Embeddings] = None, table_name: str = _DEFAULT_TABLE_NAME, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> AwaDB: """Create an AwaDB vectorstore from a list of documents. If a log_and_data_dir specified, the table will be persisted there. Args: documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. table_name (str): Name of the table to create. log_and_data_dir (Optional[str]): Directory to persist the table. client (Optional[awadb.Client]): AwaDB client. Any: Any possible parameters in the future Returns: AwaDB: AwaDB vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts=texts, embedding=embedding, metadatas=metadatas, table_name=table_name, log_and_data_dir=log_and_data_dir, client=client, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~usearch.py
from __future__ import annotations from typing import Any, Dict, Iterable, List, Optional, Tuple import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.docstore.base import AddableMixin, Docstore from langchain_community.docstore.in_memory import InMemoryDocstore def dependable_usearch_import() -> Any: """ Import usearch if available, otherwise raise error. """ try: import usearch.index except ImportError: raise ImportError( "Could not import usearch python package. " "Please install it with `pip install usearch` " ) return usearch.index class USearch(VectorStore): """`USearch` vector store. To use, you should have the ``usearch`` python package installed. """ def __init__( self, embedding: Embeddings, index: Any, docstore: Docstore, ids: List[str], ): """Initialize with necessary components.""" self.embedding = embedding self.index = index self.docstore = docstore self.ids = ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict]] = None, ids: Optional[np.ndarray] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) embeddings = self.embedding.embed_documents(list(texts)) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) last_id = int(self.ids[-1]) + 1 if ids is None: ids = np.array([str(last_id + id) for id, _ in enumerate(texts)]) self.index.add(np.array(ids), np.array(embeddings)) self.docstore.add(dict(zip(ids, documents))) self.ids.extend(ids) return ids.tolist() def similarity_search_with_score( self, query: str, k: int = 4, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of documents most similar to the query with distance. """ query_embedding = self.embedding.embed_query(query) matches = self.index.search(np.array(query_embedding), k) docs_with_scores: List[Tuple[Document, float]] = [] for id, score in zip(matches.keys, matches.distances): doc = self.docstore.search(str(id)) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {id}, got {doc}") docs_with_scores.append((doc, score)) return docs_with_scores def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ query_embedding = self.embedding.embed_query(query) matches = self.index.search(np.array(query_embedding), k) docs: List[Document] = [] for id in matches.keys: doc = self.docstore.search(str(id)) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {id}, got {doc}") docs.append(doc) return docs @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict]] = None, ids: Optional[np.ndarray] = None, metric: str = "cos", **kwargs: Any, ) -> USearch: """Construct USearch wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the USearch database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import USearch from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() usearch = USearch.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) documents: List[Document] = [] if ids is None: ids = np.array([str(id) for id, _ in enumerate(texts)]) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) docstore = InMemoryDocstore(dict(zip(ids, documents))) usearch = dependable_usearch_import() index = usearch.Index(ndim=len(embeddings[0]), metric=metric) index.add(np.array(ids), np.array(embeddings)) return cls(embedding, index, docstore, ids.tolist())
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~output_parsers~fix.py
from __future__ import annotations from typing import Any, TypeVar from libs.core.langchain_core.exceptions import OutputParserException from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.output_parsers import BaseOutputParser from libs.core.langchain_core.prompts import BasePromptTemplate from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @classmethod def is_lc_serializable(cls) -> bool: return True parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, max_retries: int = 1, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: OutputFixingParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~query_constructor~parser.py
import datetime import warnings from typing import Any, Literal, Optional, Sequence, Union from libs.core.langchain_core.utils import check_package_version from typing_extensions import TypedDict try: check_package_version("lark", gte_version="1.1.5") from lark import Lark, Transformer, v_args except ImportError: def v_args(*args: Any, **kwargs: Any) -> Any: # type: ignore """Dummy decorator for when lark is not installed.""" return lambda _: None Transformer = object # type: ignore Lark = object # type: ignore from langchain.chains.query_constructor.ir import ( Comparator, Comparison, FilterDirective, Operation, Operator, ) GRAMMAR = r""" ?program: func_call ?expr: func_call | value func_call: CNAME "(" [args] ")" ?value: SIGNED_INT -> int | SIGNED_FLOAT -> float | DATE -> date | list | string | ("false" | "False" | "FALSE") -> false | ("true" | "True" | "TRUE") -> true args: expr ("," expr)* DATE.2: /["']?(\d{4}-[01]\d-[0-3]\d)["']?/ string: /'[^']*'/ | ESCAPED_STRING list: "[" [args] "]" %import common.CNAME %import common.ESCAPED_STRING %import common.SIGNED_FLOAT %import common.SIGNED_INT %import common.WS %ignore WS """ class ISO8601Date(TypedDict): """A date in ISO 8601 format (YYYY-MM-DD).""" date: str type: Literal["date"] @v_args(inline=True) class QueryTransformer(Transformer): """Transforms a query string into an intermediate representation.""" def __init__( self, *args: Any, allowed_comparators: Optional[Sequence[Comparator]] = None, allowed_operators: Optional[Sequence[Operator]] = None, allowed_attributes: Optional[Sequence[str]] = None, **kwargs: Any, ): super().__init__(*args, **kwargs) self.allowed_comparators = allowed_comparators self.allowed_operators = allowed_operators self.allowed_attributes = allowed_attributes def program(self, *items: Any) -> tuple: return items def func_call(self, func_name: Any, args: list) -> FilterDirective: func = self._match_func_name(str(func_name)) if isinstance(func, Comparator): if self.allowed_attributes and args[0] not in self.allowed_attributes: raise ValueError( f"Received invalid attributes {args[0]}. Allowed attributes are " f"{self.allowed_attributes}" ) return Comparison(comparator=func, attribute=args[0], value=args[1]) elif len(args) == 1 and func in (Operator.AND, Operator.OR): return args[0] else: return Operation(operator=func, arguments=args) def _match_func_name(self, func_name: str) -> Union[Operator, Comparator]: if func_name in set(Comparator): if self.allowed_comparators is not None: if func_name not in self.allowed_comparators: raise ValueError( f"Received disallowed comparator {func_name}. Allowed " f"comparators are {self.allowed_comparators}" ) return Comparator(func_name) elif func_name in set(Operator): if self.allowed_operators is not None: if func_name not in self.allowed_operators: raise ValueError( f"Received disallowed operator {func_name}. Allowed operators" f" are {self.allowed_operators}" ) return Operator(func_name) else: raise ValueError( f"Received unrecognized function {func_name}. Valid functions are " f"{list(Operator) + list(Comparator)}" ) def args(self, *items: Any) -> tuple: return items def false(self) -> bool: return False def true(self) -> bool: return True def list(self, item: Any) -> list: if item is None: return [] return list(item) def int(self, item: Any) -> int: return int(item) def float(self, item: Any) -> float: return float(item) def date(self, item: Any) -> ISO8601Date: item = str(item).strip("\"'") try: datetime.datetime.strptime(item, "%Y-%m-%d") except ValueError: warnings.warn( "Dates are expected to be provided in ISO 8601 date format " "(YYYY-MM-DD)." ) return {"date": item, "type": "date"} def string(self, item: Any) -> str: # Remove escaped quotes return str(item).strip("\"'") def get_parser( allowed_comparators: Optional[Sequence[Comparator]] = None, allowed_operators: Optional[Sequence[Operator]] = None, allowed_attributes: Optional[Sequence[str]] = None, ) -> Lark: """ Returns a parser for the query language. Args: allowed_comparators: Optional[Sequence[Comparator]] allowed_operators: Optional[Sequence[Operator]] Returns: Lark parser for the query language. """ # QueryTransformer is None when Lark cannot be imported. if QueryTransformer is None: raise ImportError( "Cannot import lark, please install it with 'pip install lark'." ) transformer = QueryTransformer( allowed_comparators=allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attributes, ) return Lark(GRAMMAR, parser="lalr", transformer=transformer, start="program")
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~json_loader.py
import json from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class JSONLoader(BaseLoader): """Load a `JSON` file using a `jq` schema. Example: [{"text": ...}, {"text": ...}, {"text": ...}] -> schema = .[].text {"key": [{"text": ...}, {"text": ...}, {"text": ...}]} -> schema = .key[].text ["", "", ""] -> schema = .[] """ def __init__( self, file_path: Union[str, Path], jq_schema: str, content_key: Optional[str] = None, metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None, text_content: bool = True, json_lines: bool = False, ): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON or JSON Lines file. jq_schema (str): The jq schema to use to extract the data or text from the JSON. content_key (str): The key to use to extract the content from the JSON if the jq_schema results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicate whether the content is in string format, default to True. json_lines (bool): Boolean flag to indicate whether the input is in JSON Lines format. """ try: import jq # noqa:F401 except ImportError: raise ImportError( "jq package not found, please install it with `pip install jq`" ) self.file_path = Path(file_path).resolve() self._jq_schema = jq.compile(jq_schema) self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content self._json_lines = json_lines def load(self) -> List[Document]: """Load and return documents from the JSON file.""" docs: List[Document] = [] if self._json_lines: with self.file_path.open(encoding="utf-8") as f: for line in f: line = line.strip() if line: self._parse(line, docs) else: self._parse(self.file_path.read_text(encoding="utf-8"), docs) return docs def _parse(self, content: str, docs: List[Document]) -> None: """Convert given content to documents.""" data = self._jq_schema.input(json.loads(content)) # Perform some validation # This is not a perfect validation, but it should catch most cases # and prevent the user from getting a cryptic error later on. if self._content_key is not None: self._validate_content_key(data) if self._metadata_func is not None: self._validate_metadata_func(data) for i, sample in enumerate(data, len(docs) + 1): text = self._get_text(sample=sample) metadata = self._get_metadata( sample=sample, source=str(self.file_path), seq_num=i ) docs.append(Document(page_content=text, metadata=metadata)) def _get_text(self, sample: Any) -> str: """Convert sample to string format""" if self._content_key is not None: content = sample.get(self._content_key) else: content = sample if self._text_content and not isinstance(content, str): raise ValueError( f"Expected page_content is string, got {type(content)} instead. \ Set `text_content=False` if the desired input for \ `page_content` is not a string" ) # In case the text is None, set it to an empty string elif isinstance(content, str): return content elif isinstance(content, dict): return json.dumps(content) if content else "" else: return str(content) if content is not None else "" def _get_metadata( self, sample: Dict[str, Any], **additional_fields: Any ) -> Dict[str, Any]: """ Return a metadata dictionary base on the existence of metadata_func :param sample: single data payload :param additional_fields: key-word arguments to be added as metadata values :return: """ if self._metadata_func is not None: return self._metadata_func(sample, additional_fields) else: return additional_fields def _validate_content_key(self, data: Any) -> None: """Check if a content key is valid""" sample = data.first() if not isinstance(sample, dict): raise ValueError( f"Expected the jq schema to result in a list of objects (dict), \ so sample must be a dict but got `{type(sample)}`" ) if sample.get(self._content_key) is None: raise ValueError( f"Expected the jq schema to result in a list of objects (dict) \ with the key `{self._content_key}`" ) def _validate_metadata_func(self, data: Any) -> None: """Check if the metadata_func output is valid""" sample = data.first() if self._metadata_func is not None: sample_metadata = self._metadata_func(sample, {}) if not isinstance(sample_metadata, dict): raise ValueError( f"Expected the metadata_func to return a dict but got \ `{type(sample_metadata)}`" )
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~llms~fake_llm.py
"""Fake LLM wrapper for testing purposes.""" from typing import Any, Dict, List, Mapping, Optional, cast from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import validator from langchain.callbacks.manager import CallbackManagerForLLMRun class FakeLLM(LLM): """Fake LLM wrapper for testing purposes.""" queries: Optional[Mapping] = None sequential_responses: Optional[bool] = False response_index: int = 0 @validator("queries", always=True) def check_queries_required( cls, queries: Optional[Mapping], values: Mapping[str, Any] ) -> Optional[Mapping]: if values.get("sequential_response") and not queries: raise ValueError( "queries is required when sequential_response is set to True" ) return queries def get_num_tokens(self, text: str) -> int: """Return number of tokens.""" return len(text.split()) @property def _llm_type(self) -> str: """Return type of llm.""" return "fake" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if self.sequential_responses: return self._get_next_response_in_sequence if self.queries is not None: return self.queries[prompt] if stop is None: return "foo" else: return "bar" @property def _identifying_params(self) -> Dict[str, Any]: return {} @property def _get_next_response_in_sequence(self) -> str: queries = cast(Mapping, self.queries) response = queries[list(queries.keys())[self.response_index]] self.response_index = self.response_index + 1 return response
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~hologres.py
from __future__ import annotations import logging import uuid from typing import Any, Dict, Iterable, List, Optional, Tuple, Type from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils import get_from_dict_or_env from libs.core.langchain_core.vectorstores import VectorStore ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_pg_embedding" class Hologres(VectorStore): """`Hologres API` vector store. - `connection_string` is a hologres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `ndims` is the number of dimensions of the embedding output. - `table_name` is the name of the table to store embeddings and data. (default: langchain_pg_embedding) - NOTE: The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `pre_delete_table` if True, will delete the table if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, logger: Optional[logging.Logger] = None, ) -> None: self.connection_string = connection_string self.ndims = ndims self.table_name = table_name self.embedding_function = embedding_function self.pre_delete_table = pre_delete_table self.logger = logger or logging.getLogger(__name__) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ from hologres_vector import HologresVector self.storage = HologresVector( self.connection_string, ndims=self.ndims, table_name=self.table_name, table_schema={"document": "text"}, pre_delete_table=self.pre_delete_table, ) @property def embeddings(self) -> Embeddings: return self.embedding_function @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding_function: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, embedding_function=embedding_function, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: List[dict], ids: List[str], **kwargs: Any, ) -> None: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ try: schema_datas = [{"document": t} for t in texts] self.storage.upsert_vectors(embeddings, ids, metadatas, schema_datas) except Exception as e: self.logger.exception(e) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Hologres with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results: List[dict[str, Any]] = self.storage.search( embedding, k=k, select_columns=["document"], metadata_filters=filter ) docs = [ ( Document( page_content=result["document"], metadata=result["metadata"], ), result["distance"], ) for result in results ] return docs @classmethod def from_texts( cls: Type[Hologres], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from texts and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """Construct Hologres wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params Example: .. code-block:: python from langchain_community.vectorstores import Hologres from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) @classmethod def from_existing_index( cls: Type[Hologres], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Get instance of an existing Hologres store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, ndims=ndims, table_name=table_name, embedding_function=embedding, pre_delete_table=pre_delete_table, ) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="HOLOGRES_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Hologres connection string is required" "Either pass it as a parameter" "or set the HOLOGRES_CONNECTION_STRING environment variable." "Create the connection string by calling" "HologresVector.connection_string_from_db_params" ) return connection_string @classmethod def from_documents( cls: Type[Hologres], documents: List[Document], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from documents and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, **kwargs, ) @classmethod def connection_string_from_db_params( cls, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return ( f"dbname={database} user={user} password={password} host={host} port={port}" )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~chroma.py
from __future__ import annotations import base64 import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, ) import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils import xor_args from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import chromadb import chromadb.config from chromadb.api.types import ID, OneOrMany, Where, WhereDocument logger = logging.getLogger() DEFAULT_K = 4 # Number of Documents to return. def _results_to_docs(results: Any) -> List[Document]: return [doc for doc, _ in _results_to_docs_and_scores(results)] def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]: return [ # TODO: Chroma can do batch querying, # we shouldn't hard code to the 1st result (Document(page_content=result[0], metadata=result[1] or {}), result[2]) for result in zip( results["documents"][0], results["metadatas"][0], results["distances"][0], ) ] class Chroma(VectorStore): """`ChromaDB` vector store. To use, you should have the ``chromadb`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import Chroma from langchain_community.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = Chroma("langchain_store", embeddings) """ _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" def __init__( self, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, embedding_function: Optional[Embeddings] = None, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, collection_metadata: Optional[Dict] = None, client: Optional[chromadb.Client] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, ) -> None: """Initialize with a Chroma client.""" try: import chromadb import chromadb.config except ImportError: raise ImportError( "Could not import chromadb python package. " "Please install it with `pip install chromadb`." ) if client is not None: self._client_settings = client_settings self._client = client self._persist_directory = persist_directory else: if client_settings: # If client_settings is provided with persist_directory specified, # then it is "in-memory and persisting to disk" mode. client_settings.persist_directory = ( persist_directory or client_settings.persist_directory ) if client_settings.persist_directory is not None: # Maintain backwards compatibility with chromadb < 0.4.0 major, minor, _ = chromadb.__version__.split(".") if int(major) == 0 and int(minor) < 4: client_settings.chroma_db_impl = "duckdb+parquet" _client_settings = client_settings elif persist_directory: # Maintain backwards compatibility with chromadb < 0.4.0 major, minor, _ = chromadb.__version__.split(".") if int(major) == 0 and int(minor) < 4: _client_settings = chromadb.config.Settings( chroma_db_impl="duckdb+parquet", ) else: _client_settings = chromadb.config.Settings(is_persistent=True) _client_settings.persist_directory = persist_directory else: _client_settings = chromadb.config.Settings() self._client_settings = _client_settings self._client = chromadb.Client(_client_settings) self._persist_directory = ( _client_settings.persist_directory or persist_directory ) self._embedding_function = embedding_function self._collection = self._client.get_or_create_collection( name=collection_name, embedding_function=None, metadata=collection_metadata, ) self.override_relevance_score_fn = relevance_score_fn @property def embeddings(self) -> Optional[Embeddings]: return self._embedding_function @xor_args(("query_texts", "query_embeddings")) def __query_collection( self, query_texts: Optional[List[str]] = None, query_embeddings: Optional[List[List[float]]] = None, n_results: int = 4, where: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Query the chroma collection.""" try: import chromadb # noqa: F401 except ImportError: raise ValueError( "Could not import chromadb python package. " "Please install it with `pip install chromadb`." ) return self._collection.query( query_texts=query_texts, query_embeddings=query_embeddings, n_results=n_results, where=where, where_document=where_document, **kwargs, ) def encode_image(self, uri: str) -> str: """Get base64 string from image URI.""" with open(uri, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def add_images( self, uris: List[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more images through the embeddings and add to the vectorstore. Args: uris List[str]: File path to the image. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added images. """ # Map from uris to b64 encoded strings b64_texts = [self.encode_image(uri=uri) for uri in uris] # Populate IDs if ids is None: ids = [str(uuid.uuid1()) for _ in uris] embeddings = None # Set embeddings if self._embedding_function is not None and hasattr( self._embedding_function, "embed_image" ): embeddings = self._embedding_function.embed_image(uris=uris) if metadatas: # fill metadatas with empty dicts if somebody # did not specify metadata for all images length_diff = len(uris) - len(metadatas) if length_diff: metadatas = metadatas + [{}] * length_diff empty_ids = [] non_empty_ids = [] for idx, m in enumerate(metadatas): if m: non_empty_ids.append(idx) else: empty_ids.append(idx) if non_empty_ids: metadatas = [metadatas[idx] for idx in non_empty_ids] images_with_metadatas = [uris[idx] for idx in non_empty_ids] embeddings_with_metadatas = ( [embeddings[idx] for idx in non_empty_ids] if embeddings else None ) ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: self._collection.upsert( metadatas=metadatas, embeddings=embeddings_with_metadatas, documents=images_with_metadatas, ids=ids_with_metadata, ) except ValueError as e: if "Expected metadata value to be" in str(e): msg = ( "Try filtering complex metadata using " "langchain.vectorstores.utils.filter_complex_metadata." ) raise ValueError(e.args[0] + "\n\n" + msg) else: raise e if empty_ids: images_without_metadatas = [uris[j] for j in empty_ids] embeddings_without_metadatas = ( [embeddings[j] for j in empty_ids] if embeddings else None ) ids_without_metadatas = [ids[j] for j in empty_ids] self._collection.upsert( embeddings=embeddings_without_metadatas, documents=images_without_metadatas, ids=ids_without_metadatas, ) else: self._collection.upsert( embeddings=embeddings, documents=b64_texts, ids=ids, ) return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts. """ # TODO: Handle the case where the user doesn't provide ids on the Collection if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = None texts = list(texts) if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(texts) if metadatas: # fill metadatas with empty dicts if somebody # did not specify metadata for all texts length_diff = len(texts) - len(metadatas) if length_diff: metadatas = metadatas + [{}] * length_diff empty_ids = [] non_empty_ids = [] for idx, m in enumerate(metadatas): if m: non_empty_ids.append(idx) else: empty_ids.append(idx) if non_empty_ids: metadatas = [metadatas[idx] for idx in non_empty_ids] texts_with_metadatas = [texts[idx] for idx in non_empty_ids] embeddings_with_metadatas = ( [embeddings[idx] for idx in non_empty_ids] if embeddings else None ) ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: self._collection.upsert( metadatas=metadatas, embeddings=embeddings_with_metadatas, documents=texts_with_metadatas, ids=ids_with_metadata, ) except ValueError as e: if "Expected metadata value to be" in str(e): msg = ( "Try filtering complex metadata from the document using " "langchain.vectorstores.utils.filter_complex_metadata." ) raise ValueError(e.args[0] + "\n\n" + msg) else: raise e if empty_ids: texts_without_metadatas = [texts[j] for j in empty_ids] embeddings_without_metadatas = ( [embeddings[j] for j in empty_ids] if embeddings else None ) ids_without_metadatas = [ids[j] for j in empty_ids] self._collection.upsert( embeddings=embeddings_without_metadatas, documents=texts_without_metadatas, ids=ids_without_metadatas, ) else: self._collection.upsert( embeddings=embeddings, documents=texts, ids=ids, ) return ids def similarity_search( self, query: str, k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Chroma. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of documents most similar to the query text. """ docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) return [doc for doc, _ in docs_and_scores] def similarity_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ results = self.__query_collection( query_embeddings=embedding, n_results=k, where=filter, where_document=where_document, ) return _results_to_docs(results) def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Return docs most similar to embedding vector and similarity score. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ results = self.__query_collection( query_embeddings=embedding, n_results=k, where=filter, where_document=where_document, ) return _results_to_docs_and_scores(results) def similarity_search_with_score( self, query: str, k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding_function is None: results = self.__query_collection( query_texts=[query], n_results=k, where=filter, where_document=where_document, ) else: query_embedding = self._embedding_function.embed_query(query) results = self.__query_collection( query_embeddings=[query_embedding], n_results=k, where=filter, where_document=where_document, ) return _results_to_docs_and_scores(results) def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn: return self.override_relevance_score_fn distance = "l2" distance_key = "hnsw:space" metadata = self._collection.metadata if metadata and distance_key in metadata: distance = metadata[distance_key] if distance == "cosine": return self._cosine_relevance_score_fn elif distance == "l2": return self._euclidean_relevance_score_fn elif distance == "ip": return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance metric of type: {distance}." "Consider providing relevance_score_fn to Chroma constructor." ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ results = self.__query_collection( query_embeddings=embedding, n_results=fetch_k, where=filter, where_document=where_document, include=["metadatas", "documents", "distances", "embeddings"], ) mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), results["embeddings"][0], k=k, lambda_mult=lambda_mult, ) candidates = _results_to_docs(results) selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected] return selected_results def max_marginal_relevance_search( self, query: str, k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on" "creation." ) embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult, filter=filter, where_document=where_document, ) return docs def delete_collection(self) -> None: """Delete the collection.""" self._client.delete_collection(self._collection.name) def get( self, ids: Optional[OneOrMany[ID]] = None, where: Optional[Where] = None, limit: Optional[int] = None, offset: Optional[int] = None, where_document: Optional[WhereDocument] = None, include: Optional[List[str]] = None, ) -> Dict[str, Any]: """Gets the collection. Args: ids: The ids of the embeddings to get. Optional. where: A Where type dict used to filter results by. E.g. `{"color" : "red", "price": 4.20}`. Optional. limit: The number of documents to return. Optional. offset: The offset to start returning results from. Useful for paging results with limit. Optional. where_document: A WhereDocument type dict used to filter by the documents. E.g. `{$contains: "hello"}`. Optional. include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional. """ kwargs = { "ids": ids, "where": where, "limit": limit, "offset": offset, "where_document": where_document, } if include is not None: kwargs["include"] = include return self._collection.get(**kwargs) def persist(self) -> None: """Persist the collection. This can be used to explicitly persist the data to disk. It will also be called automatically when the object is destroyed. """ if self._persist_directory is None: raise ValueError( "You must specify a persist_directory on" "creation to persist the collection." ) import chromadb # Maintain backwards compatibility with chromadb < 0.4.0 major, minor, _ = chromadb.__version__.split(".") if int(major) == 0 and int(minor) < 4: self._client.persist() def update_document(self, document_id: str, document: Document) -> None: """Update a document in the collection. Args: document_id (str): ID of the document to update. document (Document): Document to update. """ return self.update_documents([document_id], [document]) def update_documents(self, ids: List[str], documents: List[Document]) -> None: """Update a document in the collection. Args: ids (List[str]): List of ids of the document to update. documents (List[Document]): List of documents to update. """ text = [document.page_content for document in documents] metadata = [document.metadata for document in documents] if self._embedding_function is None: raise ValueError( "For update, you must specify an embedding function on creation." ) embeddings = self._embedding_function.embed_documents(text) if hasattr( self._collection._client, "max_batch_size" ): # for Chroma 0.4.10 and above from chromadb.utils.batch_utils import create_batches for batch in create_batches( api=self._collection._client, ids=ids, metadatas=metadata, documents=text, embeddings=embeddings, ): self._collection.update( ids=batch[0], embeddings=batch[1], documents=batch[3], metadatas=batch[2], ) else: self._collection.update( ids=ids, embeddings=embeddings, documents=text, metadatas=metadata, ) @classmethod def from_texts( cls: Type[Chroma], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, collection_metadata: Optional[Dict] = None, **kwargs: Any, ) -> Chroma: """Create a Chroma vectorstore from a raw documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Args: texts (List[str]): List of texts to add to the collection. collection_name (str): Name of the collection to create. persist_directory (Optional[str]): Directory to persist the collection. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. client_settings (Optional[chromadb.config.Settings]): Chroma client settings collection_metadata (Optional[Dict]): Collection configurations. Defaults to None. Returns: Chroma: Chroma vectorstore. """ chroma_collection = cls( collection_name=collection_name, embedding_function=embedding, persist_directory=persist_directory, client_settings=client_settings, client=client, collection_metadata=collection_metadata, **kwargs, ) if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if hasattr( chroma_collection._client, "max_batch_size" ): # for Chroma 0.4.10 and above from chromadb.utils.batch_utils import create_batches for batch in create_batches( api=chroma_collection._client, ids=ids, metadatas=metadatas, documents=texts, ): chroma_collection.add_texts( texts=batch[3] if batch[3] else [], metadatas=batch[2] if batch[2] else None, ids=batch[0], ) else: chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids) return chroma_collection @classmethod def from_documents( cls: Type[Chroma], documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, # Add this line collection_metadata: Optional[Dict] = None, **kwargs: Any, ) -> Chroma: """Create a Chroma vectorstore from a list of documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Args: collection_name (str): Name of the collection to create. persist_directory (Optional[str]): Directory to persist the collection. ids (Optional[List[str]]): List of document IDs. Defaults to None. documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. client_settings (Optional[chromadb.config.Settings]): Chroma client settings collection_metadata (Optional[Dict]): Collection configurations. Defaults to None. Returns: Chroma: Chroma vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts=texts, embedding=embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, persist_directory=persist_directory, client_settings=client_settings, client=client, collection_metadata=collection_metadata, **kwargs, ) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ self._collection.delete(ids=ids)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~chat_message_histories~test_streamlit.py
"""Unit tests for StreamlitChatMessageHistory functionality.""" import pytest test_script = """ import json import streamlit as st from langchain.memory import ConversationBufferMemory from langchain_community.chat_message_histories import StreamlitChatMessageHistory from libs.core.langchain_core.messages import message_to_dict message_history = StreamlitChatMessageHistory() memory = ConversationBufferMemory(chat_memory=message_history, return_messages=True) # Add some messages if st.checkbox("add initial messages", value=True): memory.chat_memory.add_ai_message("This is me, the AI") memory.chat_memory.add_user_message("This is me, the human") else: st.markdown("Skipped add") # Clear messages if checked if st.checkbox("clear messages"): st.markdown("Cleared!") memory.chat_memory.clear() # Write the output to st.code as a json blob for inspection messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) st.text(messages_json) """ @pytest.mark.requires("streamlit") def test_memory_with_message_store() -> None: try: from streamlit.testing.script_interactions import InteractiveScriptTests except ModuleNotFoundError: pytest.skip("Incorrect version of Streamlit installed") test_handler = InteractiveScriptTests() test_handler.setUp() try: sr = test_handler.script_from_string(test_script).run() except TypeError: # Earlier version expected 2 arguments sr = test_handler.script_from_string("memory_test.py", test_script).run() # Initial run should write two messages messages_json = sr.get("text")[-1].value assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # Uncheck the initial write, they should persist in session_state sr = sr.get("checkbox")[0].uncheck().run() assert sr.get("markdown")[0].value == "Skipped add" messages_json = sr.get("text")[-1].value assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # Clear the message history sr = sr.get("checkbox")[1].check().run() assert sr.get("markdown")[1].value == "Cleared!" messages_json = sr.get("text")[-1].value assert messages_json == "[]"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~human.py
from typing import Any, Callable, List, Mapping, Optional from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import Field from langchain_community.llms.utils import enforce_stop_tokens def _display_prompt(prompt: str) -> None: """Displays the given prompt to the user.""" print(f"\n{prompt}") def _collect_user_input( separator: Optional[str] = None, stop: Optional[List[str]] = None ) -> str: """Collects and returns user input as a single string.""" separator = separator or "\n" lines = [] while True: line = input() if not line: break lines.append(line) if stop and any(seq in line for seq in stop): break # Combine all lines into a single string multi_line_input = separator.join(lines) return multi_line_input class HumanInputLLM(LLM): """ It returns user input as the response. """ input_func: Callable = Field(default_factory=lambda: _collect_user_input) prompt_func: Callable[[str], None] = Field(default_factory=lambda: _display_prompt) separator: str = "\n" input_kwargs: Mapping[str, Any] = {} prompt_kwargs: Mapping[str, Any] = {} @property def _identifying_params(self) -> Mapping[str, Any]: """ Returns an empty dictionary as there are no identifying parameters. """ return {} @property def _llm_type(self) -> str: """Returns the type of LLM.""" return "human-input" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """ Displays the prompt to the user and returns their input as a response. Args: prompt (str): The prompt to be displayed to the user. stop (Optional[List[str]]): A list of stop strings. run_manager (Optional[CallbackManagerForLLMRun]): Currently not used. Returns: str: The user's input as a response. """ self.prompt_func(prompt, **self.prompt_kwargs) user_input = self.input_func( separator=self.separator, stop=stop, **self.input_kwargs ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the human themselves user_input = enforce_stop_tokens(user_input, stop) return user_input
[ "{}" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~mlflow.py
from __future__ import annotations from typing import Any, Iterator, List from urllib.parse import urlparse from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, PrivateAttr def _chunk(texts: List[str], size: int) -> Iterator[List[str]]: for i in range(0, len(texts), size): yield texts[i : i + size] class MlflowEmbeddings(Embeddings, BaseModel): """Wrapper around embeddings LLMs in MLflow. To use, you should have the `mlflow[genai]` python package installed. For more information, see https://mlflow.org/docs/latest/llms/deployments/server.html. Example: .. code-block:: python from langchain_community.embeddings import MlflowEmbeddings embeddings = MlflowEmbeddings( target_uri="http://localhost:5000", endpoint="embeddings", ) """ endpoint: str """The endpoint to use.""" target_uri: str """The target URI to use.""" _client: Any = PrivateAttr() def __init__(self, **kwargs: Any): super().__init__(**kwargs) self._validate_uri() try: from mlflow.deployments import get_deploy_client self._client = get_deploy_client(self.target_uri) except ImportError as e: raise ImportError( "Failed to create the client. " f"Please run `pip install mlflow{self._mlflow_extras}` to install " "required dependencies." ) from e @property def _mlflow_extras(self) -> str: return "[genai]" def _validate_uri(self) -> None: if self.target_uri == "databricks": return allowed = ["http", "https", "databricks"] if urlparse(self.target_uri).scheme not in allowed: raise ValueError( f"Invalid target URI: {self.target_uri}. " f"The scheme must be one of {allowed}." ) def embed_documents(self, texts: List[str]) -> List[List[float]]: embeddings: List[List[float]] = [] for txt in _chunk(texts, 20): resp = self._client.predict(endpoint=self.endpoint, inputs={"input": txt}) embeddings.extend(r["embedding"] for r in resp["data"]) return embeddings def embed_query(self, text: str) -> List[float]: return self.embed_documents([text])[0]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~azlyrics.py
from typing import List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.web_base import WebBaseLoader class AZLyricsLoader(WebBaseLoader): """Load `AZLyrics` webpages.""" def load(self) -> List[Document]: """Load webpages into Documents.""" soup = self.scrape() title = soup.title.text lyrics = soup.find_all("div", {"class": ""})[2].text text = title + lyrics metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~browserless.py
from typing import Iterator, List, Union import requests from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class BrowserlessLoader(BaseLoader): """Load webpages with `Browserless` /content endpoint.""" def __init__( self, api_token: str, urls: Union[str, List[str]], text_content: bool = True ): """Initialize with API token and the URLs to scrape""" self.api_token = api_token """Browserless API token.""" self.urls = urls """List of URLs to scrape.""" self.text_content = text_content def lazy_load(self) -> Iterator[Document]: """Lazy load Documents from URLs.""" for url in self.urls: if self.text_content: response = requests.post( "https://chrome.browserless.io/scrape", params={ "token": self.api_token, }, json={ "url": url, "elements": [ { "selector": "body", } ], }, ) yield Document( page_content=response.json()["data"][0]["results"][0]["text"], metadata={ "source": url, }, ) else: response = requests.post( "https://chrome.browserless.io/content", params={ "token": self.api_token, }, json={ "url": url, }, ) yield Document( page_content=response.text, metadata={ "source": url, }, ) def load(self) -> List[Document]: """Load Documents from URLs.""" return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~agents~output_parsers~test_xml.py
from libs.core.langchain_core.agents import AgentAction, AgentFinish from langchain.agents.output_parsers.xml import XMLAgentOutputParser def test_tool_usage() -> None: parser = XMLAgentOutputParser() # Test when final closing </tool_input> is included _input = """<tool>search</tool><tool_input>foo</tool_input>""" output = parser.invoke(_input) expected_output = AgentAction(tool="search", tool_input="foo", log=_input) assert output == expected_output # Test when final closing </tool_input> is NOT included # This happens when it's used as a stop token _input = """<tool>search</tool><tool_input>foo</tool_input>""" output = parser.invoke(_input) expected_output = AgentAction(tool="search", tool_input="foo", log=_input) assert output == expected_output def test_finish() -> None: parser = XMLAgentOutputParser() # Test when final closing <final_answer> is included _input = """<final_answer>bar</final_answer>""" output = parser.invoke(_input) expected_output = AgentFinish(return_values={"output": "bar"}, log=_input) assert output == expected_output # Test when final closing <final_answer> is NOT included # This happens when it's used as a stop token _input = """<final_answer>bar</final_answer>""" output = parser.invoke(_input) expected_output = AgentFinish(return_values={"output": "bar"}, log=_input) assert output == expected_output
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~utilities~apify.py
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env if TYPE_CHECKING: from langchain_community.document_loaders import ApifyDatasetLoader class ApifyWrapper(BaseModel): """Wrapper around Apify. To use, you should have the ``apify-client`` python package installed, and the environment variable ``APIFY_API_TOKEN`` set with your API key, or pass `apify_api_token` as a named parameter to the constructor. """ apify_client: Any apify_client_async: Any @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate environment. Validate that an Apify API token is set and the apify-client Python package exists in the current environment. """ apify_api_token = get_from_dict_or_env( values, "apify_api_token", "APIFY_API_TOKEN" ) try: from apify_client import ApifyClient, ApifyClientAsync values["apify_client"] = ApifyClient(apify_api_token) values["apify_client_async"] = ApifyClientAsync(apify_api_token) except ImportError: raise ImportError( "Could not import apify-client Python package. " "Please install it with `pip install apify-client`." ) return values def call_actor( self, actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None, ) -> "ApifyDatasetLoader": """Run an Actor on the Apify platform and wait for results to be ready. Args: actor_id (str): The ID or name of the Actor on the Apify platform. run_input (Dict): The input object of the Actor that you're trying to run. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional): Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional): Optional memory limit for the run, in megabytes. timeout_secs (int, optional): Optional timeout for the run, in seconds. Returns: ApifyDatasetLoader: A loader that will fetch the records from the Actor run's default dataset. """ from langchain_community.document_loaders import ApifyDatasetLoader actor_call = self.apify_client.actor(actor_id).call( run_input=run_input, build=build, memory_mbytes=memory_mbytes, timeout_secs=timeout_secs, ) return ApifyDatasetLoader( dataset_id=actor_call["defaultDatasetId"], dataset_mapping_function=dataset_mapping_function, ) async def acall_actor( self, actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None, ) -> "ApifyDatasetLoader": """Run an Actor on the Apify platform and wait for results to be ready. Args: actor_id (str): The ID or name of the Actor on the Apify platform. run_input (Dict): The input object of the Actor that you're trying to run. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional): Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional): Optional memory limit for the run, in megabytes. timeout_secs (int, optional): Optional timeout for the run, in seconds. Returns: ApifyDatasetLoader: A loader that will fetch the records from the Actor run's default dataset. """ from langchain_community.document_loaders import ApifyDatasetLoader actor_call = await self.apify_client_async.actor(actor_id).call( run_input=run_input, build=build, memory_mbytes=memory_mbytes, timeout_secs=timeout_secs, ) return ApifyDatasetLoader( dataset_id=actor_call["defaultDatasetId"], dataset_mapping_function=dataset_mapping_function, ) def call_actor_task( self, task_id: str, task_input: Dict, dataset_mapping_function: Callable[[Dict], Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None, ) -> "ApifyDatasetLoader": """Run a saved Actor task on Apify and wait for results to be ready. Args: task_id (str): The ID or name of the task on the Apify platform. task_input (Dict): The input object of the task that you're trying to run. Overrides the task's saved input. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional): Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional): Optional memory limit for the run, in megabytes. timeout_secs (int, optional): Optional timeout for the run, in seconds. Returns: ApifyDatasetLoader: A loader that will fetch the records from the task run's default dataset. """ from langchain_community.document_loaders import ApifyDatasetLoader task_call = self.apify_client.task(task_id).call( task_input=task_input, build=build, memory_mbytes=memory_mbytes, timeout_secs=timeout_secs, ) return ApifyDatasetLoader( dataset_id=task_call["defaultDatasetId"], dataset_mapping_function=dataset_mapping_function, ) async def acall_actor_task( self, task_id: str, task_input: Dict, dataset_mapping_function: Callable[[Dict], Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None, ) -> "ApifyDatasetLoader": """Run a saved Actor task on Apify and wait for results to be ready. Args: task_id (str): The ID or name of the task on the Apify platform. task_input (Dict): The input object of the task that you're trying to run. Overrides the task's saved input. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional): Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional): Optional memory limit for the run, in megabytes. timeout_secs (int, optional): Optional timeout for the run, in seconds. Returns: ApifyDatasetLoader: A loader that will fetch the records from the task run's default dataset. """ from langchain_community.document_loaders import ApifyDatasetLoader task_call = await self.apify_client_async.task(task_id).call( task_input=task_input, build=build, memory_mbytes=memory_mbytes, timeout_secs=timeout_secs, ) return ApifyDatasetLoader( dataset_id=task_call["defaultDatasetId"], dataset_mapping_function=dataset_mapping_function, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~elasticsearch.py
from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from libs.core.langchain_core.utils import get_from_env if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient from libs.core.langchain_core.embeddings import Embeddings class ElasticsearchEmbeddings(Embeddings): """Elasticsearch embedding models. This class provides an interface to generate embeddings using a model deployed in an Elasticsearch cluster. It requires an Elasticsearch connection object and the model_id of the model deployed in the cluster. In Elasticsearch you need to have an embedding model loaded and deployed. - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, client: MlClient, model_id: str, *, input_field: str = "text_field", ): """ Initialize the ElasticsearchEmbeddings instance. Args: client (MlClient): An Elasticsearch ML client object. model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. """ self.client = client self.model_id = model_id self.input_field = input_field @classmethod def from_credentials( cls, model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain_community.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install " "elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID") es_user = es_user or get_from_env("es_user", "ES_USER") es_password = es_password or get_from_env("es_password", "ES_PASSWORD") # Connect to Elasticsearch es_connection = Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field) @classmethod def from_es_connection( cls, model_id: str, es_connection: Elasticsearch, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """ Instantiate embeddings from an existing Elasticsearch connection. This method provides a way to create an instance of the ElasticsearchEmbeddings class using an existing Elasticsearch connection. The connection object is used to create an MlClient, which is then used to initialize the ElasticsearchEmbeddings instance. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch connection object. input_field (str, optional): The name of the key for the input text field in the document. Defaults to 'text_field'. Returns: ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class. Example: .. code-block:: python from elasticsearch import Elasticsearch from langchain_community.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Create Elasticsearch connection es_connection = Elasticsearch( hosts=["localhost:9200"], http_auth=("user", "password") ) # Instantiate ElasticsearchEmbeddings using the existing connection embeddings = ElasticsearchEmbeddings.from_es_connection( model_id, es_connection, input_field=input_field, ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ # Importing MlClient from elasticsearch.client within the method to # avoid unnecessary import if the method is not used from elasticsearch.client import MlClient # Create an MlClient from the given Elasticsearch connection client = MlClient(es_connection) # Return a new instance of the ElasticsearchEmbeddings class with # the MlClient, model_id, and input_field return cls(client, model_id, input_field=input_field) def _embedding_func(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for the given texts using the Elasticsearch model. Args: texts (List[str]): A list of text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each text in the input list. """ response = self.client.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) embeddings = [doc["predicted_value"] for doc in response["inference_results"]] return embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for a list of documents. Args: texts (List[str]): A list of document text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each document in the input list. """ return self._embedding_func(texts) def embed_query(self, text: str) -> List[float]: """ Generate an embedding for a single query text. Args: text (str): The query text to generate an embedding for. Returns: List[float]: The embedding for the input query text. """ return self._embedding_func([text])[0]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~schema~exceptions.py
from libs.core.langchain_core.exceptions import LangChainException __all__ = ["LangChainException"]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~javelin_ai_gateway.py
from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra # Ignoring type because below is valid pydantic code # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class Params(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Parameters for the Javelin AI Gateway LLM.""" temperature: float = 0.0 stop: Optional[List[str]] = None max_tokens: Optional[int] = None class JavelinAIGateway(LLM): """Javelin AI Gateway LLMs. To use, you should have the ``javelin_sdk`` python package installed. For more information, see https://docs.getjavelin.io Example: .. code-block:: python from langchain_community.llms import JavelinAIGateway completions = JavelinAIGateway( gateway_uri="<your-javelin-ai-gateway-uri>", route="<your-javelin-ai-gateway-completions-route>", params={ "temperature": 0.1 } ) """ route: str """The route to use for the Javelin AI Gateway API.""" client: Optional[Any] = None """The Javelin AI Gateway client.""" gateway_uri: Optional[str] = None """The URI of the Javelin AI Gateway API.""" params: Optional[Params] = None """Parameters for the Javelin AI Gateway API.""" javelin_api_key: Optional[str] = None """The API key for the Javelin AI Gateway API.""" def __init__(self, **kwargs: Any): try: from javelin_sdk import ( JavelinClient, UnauthorizedError, ) except ImportError: raise ImportError( "Could not import javelin_sdk python package. " "Please install it with `pip install javelin_sdk`." ) super().__init__(**kwargs) if self.gateway_uri: try: self.client = JavelinClient( base_url=self.gateway_uri, api_key=self.javelin_api_key ) except UnauthorizedError as e: raise ValueError("Javelin: Incorrect API Key.") from e @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Javelin AI Gateway API.""" params: Dict[str, Any] = { "gateway_uri": self.gateway_uri, "route": self.route, "javelin_api_key": self.javelin_api_key, **(self.params.dict() if self.params else {}), } return params @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return self._default_params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the Javelin AI Gateway API.""" data: Dict[str, Any] = { "prompt": prompt, **(self.params.dict() if self.params else {}), } if s := (stop or (self.params.stop if self.params else None)): data["stop"] = s if self.client is not None: resp = self.client.query_route(self.route, query_body=data) else: raise ValueError("Javelin client is not initialized.") resp_dict = resp.dict() try: return resp_dict["llm_response"]["choices"][0]["text"] except KeyError: return "" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call async the Javelin AI Gateway API.""" data: Dict[str, Any] = { "prompt": prompt, **(self.params.dict() if self.params else {}), } if s := (stop or (self.params.stop if self.params else None)): data["stop"] = s if self.client is not None: resp = await self.client.aquery_route(self.route, query_body=data) else: raise ValueError("Javelin client is not initialized.") resp_dict = resp.dict() try: return resp_dict["llm_response"]["choices"][0]["text"] except KeyError: return "" @property def _llm_type(self) -> str: """Return type of llm.""" return "javelin-ai-gateway"
[]
2024-01-10
mth93/langchain
libs~community~tests~unit_tests~chat_models~test_bedrock.py
"""Test Anthropic Chat API wrapper.""" from typing import List from unittest.mock import MagicMock import pytest from libs.core.langchain_core.messages import ( AIMessage, BaseMessage, HumanMessage, SystemMessage, ) from langchain_community.chat_models import BedrockChat from langchain_community.chat_models.meta import convert_messages_to_prompt_llama @pytest.mark.parametrize( ("messages", "expected"), [ ([HumanMessage(content="Hello")], "[INST] Hello [/INST]"), ( [HumanMessage(content="Hello"), AIMessage(content="Answer:")], "[INST] Hello [/INST]\nAnswer:", ), ( [ SystemMessage(content="You're an assistant"), HumanMessage(content="Hello"), AIMessage(content="Answer:"), ], "<<SYS>> You're an assistant <</SYS>>\n[INST] Hello [/INST]\nAnswer:", ), ], ) def test_formatting(messages: List[BaseMessage], expected: str) -> None: result = convert_messages_to_prompt_llama(messages) assert result == expected def test_anthropic_bedrock() -> None: client = MagicMock() respbody = MagicMock( read=MagicMock( return_value=MagicMock( decode=MagicMock(return_value=b'{"completion":"Hi back"}') ) ) ) client.invoke_model.return_value = {"body": respbody} model = BedrockChat(model_id="anthropic.claude-v2", client=client) # should not throw an error model.invoke("hello there")
[ "You're an assistant", "Answer:", "Hello" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~figma.py
import json import urllib.request from typing import Any, List from libs.core.langchain_core.documents import Document from libs.core.langchain_core.utils import stringify_dict from langchain_community.document_loaders.base import BaseLoader class FigmaFileLoader(BaseLoader): """Load `Figma` file.""" def __init__(self, access_token: str, ids: str, key: str): """Initialize with access token, ids, and key. Args: access_token: The access token for the Figma REST API. ids: The ids of the Figma file. key: The key for the Figma file """ self.access_token = access_token self.ids = ids self.key = key def _construct_figma_api_url(self) -> str: api_url = "https://api.figma.com/v1/files/%s/nodes?ids=%s" % ( self.key, self.ids, ) return api_url def _get_figma_file(self) -> Any: """Get Figma file from Figma REST API.""" headers = {"X-Figma-Token": self.access_token} request = urllib.request.Request( self._construct_figma_api_url(), headers=headers ) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) return json_data def load(self) -> List[Document]: """Load file""" data = self._get_figma_file() text = stringify_dict(data) metadata = {"source": self._construct_figma_api_url()} return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~memory~token_buffer.py
from typing import Any, Dict, List from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.messages import BaseMessage, get_buffer_string from langchain.memory.chat_memory import BaseChatMemory class ConversationTokenBufferMemory(BaseChatMemory): """Conversation chat memory with token limit.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel memory_key: str = "history" max_token_limit: int = 2000 @property def buffer(self) -> Any: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is False.""" return get_buffer_string( self.chat_memory.messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is True.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer. Pruned.""" super().save_context(inputs, outputs) # Prune buffer if it exceeds max token limit buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~agents~initialize.py
"""Load agent.""" from typing import Any, Optional, Sequence from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.tools import BaseTool from langchain.agents.agent import AgentExecutor from langchain.agents.agent_types import AgentType from langchain.agents.loading import AGENT_TO_CLASS, load_agent from langchain.callbacks.base import BaseCallbackManager def initialize_agent( tools: Sequence[BaseTool], llm: BaseLanguageModel, agent: Optional[AgentType] = None, callback_manager: Optional[BaseCallbackManager] = None, agent_path: Optional[str] = None, agent_kwargs: Optional[dict] = None, *, tags: Optional[Sequence[str]] = None, **kwargs: Any, ) -> AgentExecutor: """Load an agent executor given tools and LLM. Args: tools: List of tools this agent has access to. llm: Language model to use as the agent. agent: Agent type to use. If None and agent_path is also None, will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. callback_manager: CallbackManager to use. Global callback manager is used if not provided. Defaults to None. agent_path: Path to serialized agent to use. agent_kwargs: Additional keyword arguments to pass to the underlying agent tags: Tags to apply to the traced runs. **kwargs: Additional keyword arguments passed to the agent executor Returns: An agent executor """ tags_ = list(tags) if tags else [] if agent is None and agent_path is None: agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION if agent is not None and agent_path is not None: raise ValueError( "Both `agent` and `agent_path` are specified, " "but at most only one should be." ) if agent is not None: if agent not in AGENT_TO_CLASS: raise ValueError( f"Got unknown agent type: {agent}. " f"Valid types are: {AGENT_TO_CLASS.keys()}." ) tags_.append(agent.value if isinstance(agent, AgentType) else agent) agent_cls = AGENT_TO_CLASS[agent] agent_kwargs = agent_kwargs or {} agent_obj = agent_cls.from_llm_and_tools( llm, tools, callback_manager=callback_manager, **agent_kwargs ) elif agent_path is not None: agent_obj = load_agent( agent_path, llm=llm, tools=tools, callback_manager=callback_manager ) try: # TODO: Add tags from the serialized object directly. tags_.append(agent_obj._agent_type) except NotImplementedError: pass else: raise ValueError( "Somehow both `agent` and `agent_path` are None, " "this should never happen." ) return AgentExecutor.from_agent_and_tools( agent=agent_obj, tools=tools, callback_manager=callback_manager, tags=tags_, **kwargs, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~gigachat.py
import logging from typing import Any, AsyncIterator, Iterator, List, Optional from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import ( BaseChatModel, agenerate_from_stream, generate_from_stream, ) from libs.core.langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, ChatMessage, HumanMessage, SystemMessage, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_community.llms.gigachat import _BaseGigaChat logger = logging.getLogger(__name__) def _convert_dict_to_message(message: Any) -> BaseMessage: from gigachat.models import MessagesRole if message.role == MessagesRole.SYSTEM: return SystemMessage(content=message.content) elif message.role == MessagesRole.USER: return HumanMessage(content=message.content) elif message.role == MessagesRole.ASSISTANT: return AIMessage(content=message.content) else: raise TypeError(f"Got unknown role {message.role} {message}") def _convert_message_to_dict(message: BaseMessage) -> Any: from gigachat.models import Messages, MessagesRole if isinstance(message, SystemMessage): return Messages(role=MessagesRole.SYSTEM, content=message.content) elif isinstance(message, HumanMessage): return Messages(role=MessagesRole.USER, content=message.content) elif isinstance(message, AIMessage): return Messages(role=MessagesRole.ASSISTANT, content=message.content) elif isinstance(message, ChatMessage): return Messages(role=MessagesRole(message.role), content=message.content) else: raise TypeError(f"Got unknown type {message}") class GigaChat(_BaseGigaChat, BaseChatModel): """`GigaChat` large language models API. To use, you should pass login and password to access GigaChat API or use token. Example: .. code-block:: python from langchain_community.chat_models import GigaChat giga = GigaChat(credentials=..., verify_ssl_certs=False) """ def _build_payload(self, messages: List[BaseMessage]) -> Any: from gigachat.models import Chat payload = Chat( messages=[_convert_message_to_dict(m) for m in messages], profanity_check=self.profanity, ) if self.temperature is not None: payload.temperature = self.temperature if self.max_tokens is not None: payload.max_tokens = self.max_tokens if self.verbose: logger.info("Giga request: %s", payload.dict()) return payload def _create_chat_result(self, response: Any) -> ChatResult: generations = [] for res in response.choices: message = _convert_dict_to_message(res.message) finish_reason = res.finish_reason gen = ChatGeneration( message=message, generation_info={"finish_reason": finish_reason}, ) generations.append(gen) if finish_reason != "stop": logger.warning( "Giga generation stopped with reason: %s", finish_reason, ) if self.verbose: logger.info("Giga response: %s", message.content) llm_output = {"token_usage": response.usage, "model_name": response.model} return ChatResult(generations=generations, llm_output=llm_output) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: should_stream = stream if stream is not None else self.streaming if should_stream: stream_iter = self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ) return generate_from_stream(stream_iter) payload = self._build_payload(messages) response = self._client.chat(payload) return self._create_chat_result(response) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: should_stream = stream if stream is not None else self.streaming if should_stream: stream_iter = self._astream( messages, stop=stop, run_manager=run_manager, **kwargs ) return await agenerate_from_stream(stream_iter) payload = self._build_payload(messages) response = await self._client.achat(payload) return self._create_chat_result(response) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: payload = self._build_payload(messages) for chunk in self._client.stream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield ChatGenerationChunk(message=AIMessageChunk(content=content)) if run_manager: run_manager.on_llm_new_token(content) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: payload = self._build_payload(messages) async for chunk in self._client.astream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield ChatGenerationChunk(message=AIMessageChunk(content=content)) if run_manager: await run_manager.on_llm_new_token(content) def get_num_tokens(self, text: str) -> int: """Count approximate number of tokens""" return round(len(text) / 4.6)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~sharepoint.py
"""Loader that loads data from Sharepoint Document Library""" from __future__ import annotations from typing import Iterator, List, Optional, Sequence from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import Field from langchain_community.document_loaders.base_o365 import ( O365BaseLoader, _FileType, ) from langchain_community.document_loaders.parsers.registry import get_parser class SharePointLoader(O365BaseLoader): """Load from `SharePoint`.""" document_library_id: str = Field(...) """ The ID of the SharePoint document library to load data from.""" folder_path: Optional[str] = None """ The path to the folder to load data from.""" object_ids: Optional[List[str]] = None """ The IDs of the objects to load data from.""" @property def _file_types(self) -> Sequence[_FileType]: """Return supported file types.""" return _FileType.DOC, _FileType.DOCX, _FileType.PDF @property def _scopes(self) -> List[str]: """Return required scopes.""" return ["sharepoint", "basic"] def lazy_load(self) -> Iterator[Document]: """Load documents lazily. Use this when working at a large scale.""" try: from O365.drive import Drive, Folder except ImportError: raise ImportError( "O365 package not found, please install it with `pip install o365`" ) drive = self._auth().storage().get_drive(self.document_library_id) if not isinstance(drive, Drive): raise ValueError(f"There isn't a Drive with id {self.document_library_id}.") blob_parser = get_parser("default") if self.folder_path: target_folder = drive.get_item_by_path(self.folder_path) if not isinstance(target_folder, Folder): raise ValueError(f"There isn't a folder with path {self.folder_path}.") for blob in self._load_from_folder(target_folder): yield from blob_parser.lazy_parse(blob) if self.object_ids: for blob in self._load_from_object_ids(drive, self.object_ids): yield from blob_parser.lazy_parse(blob) def load(self) -> List[Document]: """Load all documents.""" return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~supabase.py
from __future__ import annotations import uuid from itertools import repeat from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type, Union, ) import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import supabase class SupabaseVectorStore(VectorStore): """`Supabase Postgres` vector store. It assumes you have the `pgvector` extension installed and a `match_documents` (or similar) function. For more details: https://integrations.langchain.com/vectorstores?integration_name=SupabaseVectorStore You can implement your own `match_documents` function in order to limit the search space to a subset of documents based on your own authorization or business logic. Note that the Supabase Python client does not yet support async operations. If you'd like to use `max_marginal_relevance_search`, please review the instructions below on modifying the `match_documents` function to return matched embeddings. Examples: .. code-block:: python from langchain_community.embeddings.openai import OpenAIEmbeddings from libs.core.langchain_core.documents import Document from langchain_community.vectorstores import SupabaseVectorStore from supabase.client import create_client docs = [ Document(page_content="foo", metadata={"id": 1}), ] embeddings = OpenAIEmbeddings() supabase_client = create_client("my_supabase_url", "my_supabase_key") vector_store = SupabaseVectorStore.from_documents( docs, embeddings, client=supabase_client, table_name="documents", query_name="match_documents", chunk_size=500, ) To load from an existing table: .. code-block:: python from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_community.vectorstores import SupabaseVectorStore from supabase.client import create_client embeddings = OpenAIEmbeddings() supabase_client = create_client("my_supabase_url", "my_supabase_key") vector_store = SupabaseVectorStore( client=supabase_client, embedding=embeddings, table_name="documents", query_name="match_documents", ) """ def __init__( self, client: supabase.client.Client, embedding: Embeddings, table_name: str, chunk_size: int = 500, query_name: Union[str, None] = None, ) -> None: """Initialize with supabase client.""" try: import supabase # noqa: F401 except ImportError: raise ImportError( "Could not import supabase python package. " "Please install it with `pip install supabase`." ) self._client = client self._embedding: Embeddings = embedding self.table_name = table_name or "documents" self.query_name = query_name or "match_documents" self.chunk_size = chunk_size or 500 # According to the SupabaseVectorStore JS implementation, the best chunk size # is 500. Though for large datasets it can be too large so it is configurable. @property def embeddings(self) -> Embeddings: return self._embedding def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: ids = ids or [str(uuid.uuid4()) for _ in texts] docs = self._texts_to_documents(texts, metadatas) vectors = self._embedding.embed_documents(list(texts)) return self.add_vectors(vectors, docs, ids) @classmethod def from_texts( cls: Type["SupabaseVectorStore"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[supabase.client.Client] = None, table_name: Optional[str] = "documents", query_name: Union[str, None] = "match_documents", chunk_size: int = 500, ids: Optional[List[str]] = None, **kwargs: Any, ) -> "SupabaseVectorStore": """Return VectorStore initialized from texts and embeddings.""" if not client: raise ValueError("Supabase client is required.") if not table_name: raise ValueError("Supabase document table_name is required.") embeddings = embedding.embed_documents(texts) ids = [str(uuid.uuid4()) for _ in texts] docs = cls._texts_to_documents(texts, metadatas) cls._add_vectors(client, table_name, embeddings, docs, ids, chunk_size) return cls( client=client, embedding=embedding, table_name=table_name, query_name=query_name, chunk_size=chunk_size, ) def add_vectors( self, vectors: List[List[float]], documents: List[Document], ids: List[str], ) -> List[str]: return self._add_vectors( self._client, self.table_name, vectors, documents, ids, self.chunk_size ) def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: vector = self._embedding.embed_query(query) return self.similarity_search_by_vector(vector, k=k, filter=filter, **kwargs) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: result = self.similarity_search_by_vector_with_relevance_scores( embedding, k=k, filter=filter, **kwargs ) documents = [doc for doc, _ in result] return documents def similarity_search_with_relevance_scores( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: vector = self._embedding.embed_query(query) return self.similarity_search_by_vector_with_relevance_scores( vector, k=k, filter=filter ) def match_args( self, query: List[float], filter: Optional[Dict[str, Any]] ) -> Dict[str, Any]: ret: Dict[str, Any] = dict(query_embedding=query) if filter: ret["filter"] = filter return ret def similarity_search_by_vector_with_relevance_scores( self, query: List[float], k: int, filter: Optional[Dict[str, Any]] = None, postgrest_filter: Optional[str] = None, ) -> List[Tuple[Document, float]]: match_documents_params = self.match_args(query, filter) query_builder = self._client.rpc(self.query_name, match_documents_params) if postgrest_filter: query_builder.params = query_builder.params.set( "and", f"({postgrest_filter})" ) query_builder.params = query_builder.params.set("limit", k) res = query_builder.execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), ) for search in res.data if search.get("content") ] return match_result def similarity_search_by_vector_returning_embeddings( self, query: List[float], k: int, filter: Optional[Dict[str, Any]] = None, postgrest_filter: Optional[str] = None, ) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]: match_documents_params = self.match_args(query, filter) query_builder = self._client.rpc(self.query_name, match_documents_params) if postgrest_filter: query_builder.params = query_builder.params.set( "and", f"({postgrest_filter})" ) query_builder.params = query_builder.params.set("limit", k) res = query_builder.execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), # Supabase returns a vector type as its string represation (!). # This is a hack to convert the string to numpy array. np.fromstring( search.get("embedding", "").strip("[]"), np.float32, sep="," ), ) for search in res.data if search.get("content") ] return match_result @staticmethod def _texts_to_documents( texts: Iterable[str], metadatas: Optional[Iterable[Dict[Any, Any]]] = None, ) -> List[Document]: """Return list of Documents from list of texts and metadatas.""" if metadatas is None: metadatas = repeat({}) docs = [ Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas) ] return docs @staticmethod def _add_vectors( client: supabase.client.Client, table_name: str, vectors: List[List[float]], documents: List[Document], ids: List[str], chunk_size: int, ) -> List[str]: """Add vectors to Supabase table.""" rows: List[Dict[str, Any]] = [ { "id": ids[idx], "content": documents[idx].page_content, "embedding": embedding, "metadata": documents[idx].metadata, # type: ignore } for idx, embedding in enumerate(vectors) ] id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i : i + chunk_size] result = client.from_(table_name).upsert(chunk).execute() # type: ignore if len(result.data) == 0: raise Exception("Error inserting: No rows added") # VectorStore.add_vectors returns ids as strings ids = [str(i.get("id")) for i in result.data if i.get("id")] id_list.extend(ids) return id_list def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ result = self.similarity_search_by_vector_returning_embeddings( embedding, fetch_k ) matched_documents = [doc_tuple[0] for doc_tuple in result] matched_embeddings = [doc_tuple[2] for doc_tuple in result] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), matched_embeddings, k=k, lambda_mult=lambda_mult, ) filtered_documents = [matched_documents[i] for i in mmr_selected] return filtered_documents def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. `max_marginal_relevance_search` requires that `query_name` returns matched embeddings alongside the match documents. The following function demonstrates how to do this: ```sql CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536), match_count int) RETURNS TABLE( id uuid, content text, metadata jsonb, embedding vector(1536), similarity float) LANGUAGE plpgsql AS $$ # variable_conflict use_column BEGIN RETURN query SELECT id, content, metadata, embedding, 1 -(docstore.embedding <=> query_embedding) AS similarity FROM docstore ORDER BY docstore.embedding <=> query_embedding LIMIT match_count; END; $$; ``` """ embedding = self._embedding.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError("No ids provided to delete.") rows: List[Dict[str, Any]] = [ { "id": id, } for id in ids ] # TODO: Check if this can be done in bulk for row in rows: self._client.from_(self.table_name).delete().eq("id", row["id"]).execute()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~parsers~msword.py
from typing import Iterator from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob class MsWordParser(BaseBlobParser): """Parse the Microsoft Word documents from a blob.""" def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Parse a Microsoft Word document into the Document iterator. Args: blob: The blob to parse. Returns: An iterator of Documents. """ try: from unstructured.partition.doc import partition_doc from unstructured.partition.docx import partition_docx except ImportError as e: raise ImportError( "Could not import unstructured, please install with `pip install " "unstructured`." ) from e mime_type_parser = { "application/msword": partition_doc, "application/vnd.openxmlformats-officedocument.wordprocessingml.document": ( partition_docx ), } if blob.mimetype not in ( "application/msword", "application/vnd.openxmlformats-officedocument.wordprocessingml.document", ): raise ValueError("This blob type is not supported for this parser.") with blob.as_bytes_io() as word_document: elements = mime_type_parser[blob.mimetype](file=word_document) text = "\n\n".join([str(el) for el in elements]) metadata = {"source": blob.source} yield Document(page_content=text, metadata=metadata)
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~retrievers~document_compressors~embeddings_filter.py
from typing import Callable, Dict, Optional, Sequence import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import root_validator from langchain.callbacks.manager import Callbacks from langchain.document_transformers.embeddings_redundant_filter import ( _get_embeddings_from_stateful_docs, get_stateful_documents, ) from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) from langchain.utils.math import cosine_similarity class EmbeddingsFilter(BaseDocumentCompressor): """Document compressor that uses embeddings to drop documents unrelated to the query.""" embeddings: Embeddings """Embeddings to use for embedding document contents and queries.""" similarity_fn: Callable = cosine_similarity """Similarity function for comparing documents. Function expected to take as input two matrices (List[List[float]]) and return a matrix of scores where higher values indicate greater similarity.""" k: Optional[int] = 20 """The number of relevant documents to return. Can be set to None, in which case `similarity_threshold` must be specified. Defaults to 20.""" similarity_threshold: Optional[float] """Threshold for determining when two documents are similar enough to be considered redundant. Defaults to None, must be specified if `k` is set to None.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_params(cls, values: Dict) -> Dict: """Validate similarity parameters.""" if values["k"] is None and values["similarity_threshold"] is None: raise ValueError("Must specify one of `k` or `similarity_threshold`.") return values def compress_documents( self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None, ) -> Sequence[Document]: """Filter documents based on similarity of their embeddings to the query.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs( self.embeddings, stateful_documents ) embedded_query = self.embeddings.embed_query(query) similarity = self.similarity_fn([embedded_query], embedded_documents)[0] included_idxs = np.arange(len(embedded_documents)) if self.k is not None: included_idxs = np.argsort(similarity)[::-1][: self.k] if self.similarity_threshold is not None: similar_enough = np.where( similarity[included_idxs] > self.similarity_threshold ) included_idxs = included_idxs[similar_enough] for i in included_idxs: stateful_documents[i].state["query_similarity_score"] = similarity[i] return [stateful_documents[i] for i in included_idxs]
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_baiducloud_vector_search.py
"""Test BESVectorStore functionality.""" from typing import List, Optional from libs.core.langchain_core.documents import Document from langchain_community.vectorstores import BESVectorStore from tests.integration_tests.vectorstores.fake_embeddings import ( FakeEmbeddings, fake_texts, ) def _bes_vector_db_from_texts( metadatas: Optional[List[dict]] = None, drop: bool = True ) -> BESVectorStore: return BESVectorStore.from_texts( fake_texts, FakeEmbeddings(), metadatas=metadatas, bes_url="http://10.0.X.X", ) def test_bes_vector_db() -> None: """Test end to end construction and search.""" docsearch = _bes_vector_db_from_texts() output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~elasticsearch.py
import logging import uuid from abc import ABC, abstractmethod from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, Union, ) import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) if TYPE_CHECKING: from elasticsearch import Elasticsearch logger = logging.getLogger(__name__) class BaseRetrievalStrategy(ABC): """Base class for `Elasticsearch` retrieval strategies.""" @abstractmethod def query( self, query_vector: Union[List[float], None], query: Union[str, None], *, k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: List[dict], similarity: Union[DistanceStrategy, None], ) -> Dict: """ Executes when a search is performed on the store. Args: query_vector: The query vector, or None if not using vector-based query. query: The text query, or None if not using text-based query. k: The total number of results to retrieve. fetch_k: The number of results to fetch initially. vector_query_field: The field containing the vector representations in the index. text_field: The field containing the text data in the index. filter: List of filter clauses to apply to the query. similarity: The similarity strategy to use, or None if not using one. Returns: Dict: The Elasticsearch query body. """ @abstractmethod def index( self, dims_length: Union[int, None], vector_query_field: str, similarity: Union[DistanceStrategy, None], ) -> Dict: """ Executes when the index is created. Args: dims_length: Numeric length of the embedding vectors, or None if not using vector-based query. vector_query_field: The field containing the vector representations in the index. similarity: The similarity strategy to use, or None if not using one. Returns: Dict: The Elasticsearch settings and mappings for the strategy. """ def before_index_setup( self, client: "Elasticsearch", text_field: str, vector_query_field: str ) -> None: """ Executes before the index is created. Used for setting up any required Elasticsearch resources like a pipeline. Args: client: The Elasticsearch client. text_field: The field containing the text data in the index. vector_query_field: The field containing the vector representations in the index. """ def require_inference(self) -> bool: """ Returns whether or not the strategy requires inference to be performed on the text before it is added to the index. Returns: bool: Whether or not the strategy requires inference to be performed on the text before it is added to the index. """ return True class ApproxRetrievalStrategy(BaseRetrievalStrategy): """Approximate retrieval strategy using the `HNSW` algorithm.""" def __init__( self, query_model_id: Optional[str] = None, hybrid: Optional[bool] = False, rrf: Optional[Union[dict, bool]] = True, ): self.query_model_id = query_model_id self.hybrid = hybrid # RRF has two optional parameters # 'rank_constant', 'window_size' # https://www.elastic.co/guide/en/elasticsearch/reference/current/rrf.html self.rrf = rrf def query( self, query_vector: Union[List[float], None], query: Union[str, None], k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: List[dict], similarity: Union[DistanceStrategy, None], ) -> Dict: knn = { "filter": filter, "field": vector_query_field, "k": k, "num_candidates": fetch_k, } # Embedding provided via the embedding function if query_vector and not self.query_model_id: knn["query_vector"] = query_vector # Case 2: Used when model has been deployed to # Elasticsearch and can infer the query vector from the query text elif query and self.query_model_id: knn["query_vector_builder"] = { "text_embedding": { "model_id": self.query_model_id, # use 'model_id' argument "model_text": query, # use 'query' argument } } else: raise ValueError( "You must provide an embedding function or a" " query_model_id to perform a similarity search." ) # If hybrid, add a query to the knn query # RRF is used to even the score from the knn query and text query # RRF has two optional parameters: {'rank_constant':int, 'window_size':int} # https://www.elastic.co/guide/en/elasticsearch/reference/current/rrf.html if self.hybrid: query_body = { "knn": knn, "query": { "bool": { "must": [ { "match": { text_field: { "query": query, } } } ], "filter": filter, } }, } if isinstance(self.rrf, dict): query_body["rank"] = {"rrf": self.rrf} elif isinstance(self.rrf, bool) and self.rrf is True: query_body["rank"] = {"rrf": {}} return query_body else: return {"knn": knn} def index( self, dims_length: Union[int, None], vector_query_field: str, similarity: Union[DistanceStrategy, None], ) -> Dict: """Create the mapping for the Elasticsearch index.""" if similarity is DistanceStrategy.COSINE: similarityAlgo = "cosine" elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE: similarityAlgo = "l2_norm" elif similarity is DistanceStrategy.DOT_PRODUCT: similarityAlgo = "dot_product" else: raise ValueError(f"Similarity {similarity} not supported.") return { "mappings": { "properties": { vector_query_field: { "type": "dense_vector", "dims": dims_length, "index": True, "similarity": similarityAlgo, }, } } } class ExactRetrievalStrategy(BaseRetrievalStrategy): """Exact retrieval strategy using the `script_score` query.""" def query( self, query_vector: Union[List[float], None], query: Union[str, None], k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: Union[List[dict], None], similarity: Union[DistanceStrategy, None], ) -> Dict: if similarity is DistanceStrategy.COSINE: similarityAlgo = ( f"cosineSimilarity(params.query_vector, '{vector_query_field}') + 1.0" ) elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE: similarityAlgo = ( f"1 / (1 + l2norm(params.query_vector, '{vector_query_field}'))" ) elif similarity is DistanceStrategy.DOT_PRODUCT: similarityAlgo = f""" double value = dotProduct(params.query_vector, '{vector_query_field}'); return sigmoid(1, Math.E, -value); """ else: raise ValueError(f"Similarity {similarity} not supported.") queryBool: Dict = {"match_all": {}} if filter: queryBool = {"bool": {"filter": filter}} return { "query": { "script_score": { "query": queryBool, "script": { "source": similarityAlgo, "params": {"query_vector": query_vector}, }, }, } } def index( self, dims_length: Union[int, None], vector_query_field: str, similarity: Union[DistanceStrategy, None], ) -> Dict: """Create the mapping for the Elasticsearch index.""" return { "mappings": { "properties": { vector_query_field: { "type": "dense_vector", "dims": dims_length, "index": False, }, } } } class SparseRetrievalStrategy(BaseRetrievalStrategy): """Sparse retrieval strategy using the `text_expansion` processor.""" def __init__(self, model_id: Optional[str] = None): self.model_id = model_id or ".elser_model_1" def query( self, query_vector: Union[List[float], None], query: Union[str, None], k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: List[dict], similarity: Union[DistanceStrategy, None], ) -> Dict: return { "query": { "bool": { "must": [ { "text_expansion": { f"{vector_query_field}.tokens": { "model_id": self.model_id, "model_text": query, } } } ], "filter": filter, } } } def _get_pipeline_name(self) -> str: return f"{self.model_id}_sparse_embedding" def before_index_setup( self, client: "Elasticsearch", text_field: str, vector_query_field: str ) -> None: # If model_id is provided, create a pipeline for the model if self.model_id: client.ingest.put_pipeline( id=self._get_pipeline_name(), description="Embedding pipeline for langchain vectorstore", processors=[ { "inference": { "model_id": self.model_id, "target_field": vector_query_field, "field_map": {text_field: "text_field"}, "inference_config": { "text_expansion": {"results_field": "tokens"} }, } } ], ) def index( self, dims_length: Union[int, None], vector_query_field: str, similarity: Union[DistanceStrategy, None], ) -> Dict: return { "mappings": { "properties": { vector_query_field: { "properties": {"tokens": {"type": "rank_features"}} } } }, "settings": {"default_pipeline": self._get_pipeline_name()}, } def require_inference(self) -> bool: return False class ElasticsearchStore(VectorStore): """`Elasticsearch` vector store. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = ElasticsearchStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", es_url="http://localhost:9200" ) Args: index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. strategy: Optional. Retrieval strategy to use when searching the index. Defaults to ApproxRetrievalStrategy. Can be one of ExactRetrievalStrategy, ApproxRetrievalStrategy, or SparseRetrievalStrategy. distance_strategy: Optional. Distance strategy to use when searching the index. Defaults to COSINE. Can be one of COSINE, EUCLIDEAN_DISTANCE, or DOT_PRODUCT. If you want to use a cloud hosted Elasticsearch instance, you can pass in the cloud_id argument instead of the es_url argument. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings vectorstore = ElasticsearchStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", es_cloud_id="<cloud_id>" es_user="elastic", es_password="<password>" ) You can also connect to an existing Elasticsearch instance by passing in a pre-existing Elasticsearch connection via the es_connection argument. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings from elasticsearch import Elasticsearch es_connection = Elasticsearch("http://localhost:9200") vectorstore = ElasticsearchStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", es_connection=es_connection ) ElasticsearchStore by default uses the ApproxRetrievalStrategy, which uses the HNSW algorithm to perform approximate nearest neighbor search. This is the fastest and most memory efficient algorithm. If you want to use the Brute force / Exact strategy for searching vectors, you can pass in the ExactRetrievalStrategy to the ElasticsearchStore constructor. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings vectorstore = ElasticsearchStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", es_url="http://localhost:9200", strategy=ElasticsearchStore.ExactRetrievalStrategy() ) Both strategies require that you know the similarity metric you want to use when creating the index. The default is cosine similarity, but you can also use dot product or euclidean distance. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_community.vectorstores.utils import DistanceStrategy vectorstore = ElasticsearchStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", es_url="http://localhost:9200", distance_strategy="DOT_PRODUCT" ) """ def __init__( self, index_name: str, *, embedding: Optional[Embeddings] = None, es_connection: Optional["Elasticsearch"] = None, es_url: Optional[str] = None, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_api_key: Optional[str] = None, es_password: Optional[str] = None, vector_query_field: str = "vector", query_field: str = "text", distance_strategy: Optional[ Literal[ DistanceStrategy.COSINE, DistanceStrategy.DOT_PRODUCT, DistanceStrategy.EUCLIDEAN_DISTANCE, ] ] = None, strategy: BaseRetrievalStrategy = ApproxRetrievalStrategy(), ): self.embedding = embedding self.index_name = index_name self.query_field = query_field self.vector_query_field = vector_query_field self.distance_strategy = ( DistanceStrategy.COSINE if distance_strategy is None else DistanceStrategy[distance_strategy] ) self.strategy = strategy if es_connection is not None: self.client = es_connection.options( headers={"user-agent": self.get_user_agent()} ) elif es_url is not None or es_cloud_id is not None: self.client = ElasticsearchStore.connect_to_elasticsearch( es_url=es_url, username=es_user, password=es_password, cloud_id=es_cloud_id, api_key=es_api_key, ) else: raise ValueError( """Either provide a pre-existing Elasticsearch connection, \ or valid credentials for creating a new connection.""" ) @staticmethod def get_user_agent() -> str: from langchain_community import __version__ return f"langchain-py-vs/{__version__}" @staticmethod def connect_to_elasticsearch( *, es_url: Optional[str] = None, cloud_id: Optional[str] = None, api_key: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> "Elasticsearch": try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) if es_url and cloud_id: raise ValueError( "Both es_url and cloud_id are defined. Please provide only one." ) connection_params: Dict[str, Any] = {} if es_url: connection_params["hosts"] = [es_url] elif cloud_id: connection_params["cloud_id"] = cloud_id else: raise ValueError("Please provide either elasticsearch_url or cloud_id.") if api_key: connection_params["api_key"] = api_key elif username and password: connection_params["basic_auth"] = (username, password) es_client = elasticsearch.Elasticsearch( **connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}, ) try: es_client.info() except Exception as e: logger.error(f"Error connecting to Elasticsearch: {e}") raise e return es_client @property def embeddings(self) -> Optional[Embeddings]: return self.embedding def similarity_search( self, query: str, k: int = 4, fetch_k: int = 50, filter: Optional[List[dict]] = None, **kwargs: Any, ) -> List[Document]: """Return Elasticsearch documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to knn num_candidates. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query, in descending order of similarity. """ results = self._search( query=query, k=k, fetch_k=fetch_k, filter=filter, **kwargs ) return [doc for doc, _ in results] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, fields: Optional[List[str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. fields: Other fields to get from elasticsearch source. These fields will be added to the document metadata. Returns: List[Document]: A list of Documents selected by maximal marginal relevance. """ if self.embedding is None: raise ValueError("You must provide an embedding function to perform MMR") remove_vector_query_field_from_metadata = True if fields is None: fields = [self.vector_query_field] elif self.vector_query_field not in fields: fields.append(self.vector_query_field) else: remove_vector_query_field_from_metadata = False # Embed the query query_embedding = self.embedding.embed_query(query) # Fetch the initial documents got_docs = self._search( query_vector=query_embedding, k=fetch_k, fields=fields, **kwargs ) # Get the embeddings for the fetched documents got_embeddings = [doc.metadata[self.vector_query_field] for doc, _ in got_docs] # Select documents using maximal marginal relevance selected_indices = maximal_marginal_relevance( np.array(query_embedding), got_embeddings, lambda_mult=lambda_mult, k=k ) selected_docs = [got_docs[i][0] for i in selected_indices] if remove_vector_query_field_from_metadata: for doc in selected_docs: del doc.metadata[self.vector_query_field] return selected_docs def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[List[dict]] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return Elasticsearch documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query and score for each """ return self._search(query=query, k=k, filter=filter, **kwargs) def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: int = 4, filter: Optional[List[Dict]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Elasticsearch documents most similar to query, along with scores. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the embedding and score for each """ return self._search(query_vector=embedding, k=k, filter=filter, **kwargs) def _search( self, query: Optional[str] = None, k: int = 4, query_vector: Union[List[float], None] = None, fetch_k: int = 50, fields: Optional[List[str]] = None, filter: Optional[List[dict]] = None, custom_query: Optional[Callable[[Dict, Union[str, None]], Dict]] = None, doc_builder: Optional[Callable[[Dict], Document]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Elasticsearch documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. query_vector: Embedding to look up documents similar to. fetch_k: Number of candidates to fetch from each shard. Defaults to 50. fields: List of fields to return from Elasticsearch. Defaults to only returning the text field. filter: Array of Elasticsearch filter clauses to apply to the query. custom_query: Function to modify the Elasticsearch query body before it is sent to Elasticsearch. Returns: List of Documents most similar to the query and score for each """ if fields is None: fields = [] if "metadata" not in fields: fields.append("metadata") if self.query_field not in fields: fields.append(self.query_field) if self.embedding and query is not None: query_vector = self.embedding.embed_query(query) query_body = self.strategy.query( query_vector=query_vector, query=query, k=k, fetch_k=fetch_k, vector_query_field=self.vector_query_field, text_field=self.query_field, filter=filter or [], similarity=self.distance_strategy, ) logger.debug(f"Query body: {query_body}") if custom_query is not None: query_body = custom_query(query_body, query) logger.debug(f"Calling custom_query, Query body now: {query_body}") # Perform the kNN search on the Elasticsearch index and return the results. response = self.client.search( index=self.index_name, **query_body, size=k, source=fields, ) def default_doc_builder(hit: Dict) -> Document: return Document( page_content=hit["_source"].get(self.query_field, ""), metadata=hit["_source"]["metadata"], ) doc_builder = doc_builder or default_doc_builder docs_and_scores = [] for hit in response["hits"]["hits"]: for field in fields: if field in hit["_source"] and field not in [ "metadata", self.query_field, ]: if "metadata" not in hit["_source"]: hit["_source"]["metadata"] = {} hit["_source"]["metadata"][field] = hit["_source"][field] docs_and_scores.append( ( doc_builder(hit), hit["_score"], ) ) return docs_and_scores def delete( self, ids: Optional[List[str]] = None, refresh_indices: Optional[bool] = True, **kwargs: Any, ) -> Optional[bool]: """Delete documents from the Elasticsearch index. Args: ids: List of ids of documents to delete. refresh_indices: Whether to refresh the index after deleting documents. Defaults to True. """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) body = [] if ids is None: raise ValueError("ids must be provided.") for _id in ids: body.append({"_op_type": "delete", "_index": self.index_name, "_id": _id}) if len(body) > 0: try: bulk(self.client, body, refresh=refresh_indices, ignore_status=404) logger.debug(f"Deleted {len(body)} texts from index") return True except BulkIndexError as e: logger.error(f"Error deleting texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e else: logger.debug("No texts to delete from index") return False def _create_index_if_not_exists( self, index_name: str, dims_length: Optional[int] = None ) -> None: """Create the Elasticsearch index if it doesn't already exist. Args: index_name: Name of the Elasticsearch index to create. dims_length: Length of the embedding vectors. """ if self.client.indices.exists(index=index_name): logger.debug(f"Index {index_name} already exists. Skipping creation.") else: if dims_length is None and self.strategy.require_inference(): raise ValueError( "Cannot create index without specifying dims_length " "when the index doesn't already exist. We infer " "dims_length from the first embedding. Check that " "you have provided an embedding function." ) self.strategy.before_index_setup( client=self.client, text_field=self.query_field, vector_query_field=self.vector_query_field, ) indexSettings = self.strategy.index( vector_query_field=self.vector_query_field, dims_length=dims_length, similarity=self.distance_strategy, ) logger.debug( f"Creating index {index_name} with mappings {indexSettings['mappings']}" ) self.client.indices.create(index=index_name, **indexSettings) def __add( self, texts: Iterable[str], embeddings: Optional[List[List[float]]], metadatas: Optional[List[Dict[Any, Any]]] = None, ids: Optional[List[str]] = None, refresh_indices: bool = True, create_index_if_not_exists: bool = True, bulk_kwargs: Optional[Dict] = None, **kwargs: Any, ) -> List[str]: try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) bulk_kwargs = bulk_kwargs or {} ids = ids or [str(uuid.uuid4()) for _ in texts] requests = [] if create_index_if_not_exists: if embeddings: dims_length = len(embeddings[0]) else: dims_length = None self._create_index_if_not_exists( index_name=self.index_name, dims_length=dims_length ) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request = { "_op_type": "index", "_index": self.index_name, self.query_field: text, "metadata": metadata, "_id": ids[i], } if embeddings: request[self.vector_query_field] = embeddings[i] requests.append(request) if len(requests) > 0: try: success, failed = bulk( self.client, requests, stats_only=True, refresh=refresh_indices, **bulk_kwargs, ) logger.debug( f"Added {success} and failed to add {failed} texts to index" ) logger.debug(f"added texts {ids} to index") return ids except BulkIndexError as e: logger.error(f"Error adding texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e else: logger.debug("No texts to add to index") return [] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, ids: Optional[List[str]] = None, refresh_indices: bool = True, create_index_if_not_exists: bool = True, bulk_kwargs: Optional[Dict] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. refresh_indices: Whether to refresh the Elasticsearch indices after adding the texts. create_index_if_not_exists: Whether to create the Elasticsearch index if it doesn't already exist. *bulk_kwargs: Additional arguments to pass to Elasticsearch bulk. - chunk_size: Optional. Number of texts to add to the index at a time. Defaults to 500. Returns: List of ids from adding the texts into the vectorstore. """ if self.embedding is not None: # If no search_type requires inference, we use the provided # embedding function to embed the texts. embeddings = self.embedding.embed_documents(list(texts)) else: # the search_type doesn't require inference, so we don't need to # embed the texts. embeddings = None return self.__add( texts, embeddings, metadatas=metadatas, ids=ids, refresh_indices=refresh_indices, create_index_if_not_exists=create_index_if_not_exists, bulk_kwargs=bulk_kwargs, kwargs=kwargs, ) def add_embeddings( self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, refresh_indices: bool = True, create_index_if_not_exists: bool = True, bulk_kwargs: Optional[Dict] = None, **kwargs: Any, ) -> List[str]: """Add the given texts and embeddings to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. refresh_indices: Whether to refresh the Elasticsearch indices after adding the texts. create_index_if_not_exists: Whether to create the Elasticsearch index if it doesn't already exist. *bulk_kwargs: Additional arguments to pass to Elasticsearch bulk. - chunk_size: Optional. Number of texts to add to the index at a time. Defaults to 500. Returns: List of ids from adding the texts into the vectorstore. """ texts, embeddings = zip(*text_embeddings) return self.__add( list(texts), list(embeddings), metadatas=metadatas, ids=ids, refresh_indices=refresh_indices, create_index_if_not_exists=create_index_if_not_exists, bulk_kwargs=bulk_kwargs, kwargs=kwargs, ) @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[Dict[str, Any]]] = None, bulk_kwargs: Optional[Dict] = None, **kwargs: Any, ) -> "ElasticsearchStore": """Construct ElasticsearchStore wrapper from raw documents. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings db = ElasticsearchStore.from_texts( texts, // embeddings optional if using // a strategy that doesn't require inference embeddings, index_name="langchain-demo", es_url="http://localhost:9200" ) Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. distance_strategy: Optional. Name of the distance strategy to use. Defaults to "COSINE". can be one of "COSINE", "EUCLIDEAN_DISTANCE", "DOT_PRODUCT". bulk_kwargs: Optional. Additional arguments to pass to Elasticsearch bulk. """ elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs( embedding=embedding, **kwargs ) # Encode the provided texts and add them to the newly created index. elasticsearchStore.add_texts( texts, metadatas=metadatas, bulk_kwargs=bulk_kwargs ) return elasticsearchStore @staticmethod def _create_cls_from_kwargs( embedding: Optional[Embeddings] = None, **kwargs: Any ) -> "ElasticsearchStore": index_name = kwargs.get("index_name") if index_name is None: raise ValueError("Please provide an index_name.") es_connection = kwargs.get("es_connection") es_cloud_id = kwargs.get("es_cloud_id") es_url = kwargs.get("es_url") es_user = kwargs.get("es_user") es_password = kwargs.get("es_password") es_api_key = kwargs.get("es_api_key") vector_query_field = kwargs.get("vector_query_field") query_field = kwargs.get("query_field") distance_strategy = kwargs.get("distance_strategy") strategy = kwargs.get("strategy", ElasticsearchStore.ApproxRetrievalStrategy()) optional_args = {} if vector_query_field is not None: optional_args["vector_query_field"] = vector_query_field if query_field is not None: optional_args["query_field"] = query_field return ElasticsearchStore( index_name=index_name, embedding=embedding, es_url=es_url, es_connection=es_connection, es_cloud_id=es_cloud_id, es_user=es_user, es_password=es_password, es_api_key=es_api_key, strategy=strategy, distance_strategy=distance_strategy, **optional_args, ) @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, bulk_kwargs: Optional[Dict] = None, **kwargs: Any, ) -> "ElasticsearchStore": """Construct ElasticsearchStore wrapper from documents. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings db = ElasticsearchStore.from_documents( texts, embeddings, index_name="langchain-demo", es_url="http://localhost:9200" ) Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. bulk_kwargs: Optional. Additional arguments to pass to Elasticsearch bulk. """ elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs( embedding=embedding, **kwargs ) # Encode the provided texts and add them to the newly created index. elasticsearchStore.add_documents(documents, bulk_kwargs=bulk_kwargs) return elasticsearchStore @staticmethod def ExactRetrievalStrategy() -> "ExactRetrievalStrategy": """Used to perform brute force / exact nearest neighbor search via script_score.""" return ExactRetrievalStrategy() @staticmethod def ApproxRetrievalStrategy( query_model_id: Optional[str] = None, hybrid: Optional[bool] = False, rrf: Optional[Union[dict, bool]] = True, ) -> "ApproxRetrievalStrategy": """Used to perform approximate nearest neighbor search using the HNSW algorithm. At build index time, this strategy will create a dense vector field in the index and store the embedding vectors in the index. At query time, the text will either be embedded using the provided embedding function or the query_model_id will be used to embed the text using the model deployed to Elasticsearch. if query_model_id is used, do not provide an embedding function. Args: query_model_id: Optional. ID of the model to use to embed the query text within the stack. Requires embedding model to be deployed to Elasticsearch. hybrid: Optional. If True, will perform a hybrid search using both the knn query and a text query. Defaults to False. rrf: Optional. rrf is Reciprocal Rank Fusion. When `hybrid` is True, and `rrf` is True, then rrf: {}. and `rrf` is False, then rrf is omitted. and isinstance(rrf, dict) is True, then pass in the dict values. rrf could be passed for adjusting 'rank_constant' and 'window_size'. """ return ApproxRetrievalStrategy( query_model_id=query_model_id, hybrid=hybrid, rrf=rrf ) @staticmethod def SparseVectorRetrievalStrategy( model_id: Optional[str] = None, ) -> "SparseRetrievalStrategy": """Used to perform sparse vector search via text_expansion. Used for when you want to use ELSER model to perform document search. At build index time, this strategy will create a pipeline that will embed the text using the ELSER model and store the resulting tokens in the index. At query time, the text will be embedded using the ELSER model and the resulting tokens will be used to perform a text_expansion query. Args: model_id: Optional. Default is ".elser_model_1". ID of the model to use to embed the query text within the stack. Requires embedding model to be deployed to Elasticsearch. """ return SparseRetrievalStrategy(model_id=model_id)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~mhtml.py
import email import logging from typing import Dict, List, Union from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class MHTMLLoader(BaseLoader): """Parse `MHTML` files with `BeautifulSoup`.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """initialize with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object. Args: file_path: Path to file to load. open_encoding: The encoding to use when opening the file. bs_kwargs: Any kwargs to pass to the BeautifulSoup object. get_text_separator: The separator to use when getting the text from the soup. """ try: import bs4 # noqa:F401 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load MHTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: message = email.message_from_string(f.read()) parts = message.get_payload() if not isinstance(parts, list): parts = [message] for part in parts: if part.get_content_type() == "text/html": html = part.get_payload(decode=True).decode() soup = BeautifulSoup(html, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)] return []
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~memory~buffer.py
from typing import Any, Dict, List, Optional from libs.core.langchain_core.messages import BaseMessage, get_buffer_string from libs.core.langchain_core.pydantic_v1 import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" return get_buffer_string( self.chat_memory.messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) def clear(self) -> None: """Clear memory contents.""" self.buffer = ""
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~jinachat.py
"""JinaChat wrapper.""" from __future__ import annotations import logging from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional, Tuple, Type, Union, ) from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import ( BaseChatModel, agenerate_from_stream, generate_from_stream, ) from libs.core.langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from libs.core.langchain_core.pydantic_v1 import Field, SecretStr, root_validator from libs.core.langchain_core.utils import ( convert_to_secret_str, get_from_dict_or_env, get_pydantic_field_names, ) from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) logger = logging.getLogger(__name__) def _create_retry_decorator(llm: JinaChat) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) async def acompletion_with_retry(llm: JinaChat, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) def _convert_delta_to_message_chunk( _dict: Mapping[str, Any], default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: role = _dict.get("role") content = _dict.get("content") or "" if role == "user" or default_class == HumanMessageChunk: return HumanMessageChunk(content=content) elif role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content) elif role == "system" or default_class == SystemMessageChunk: return SystemMessageChunk(content=content) elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: return default_class(content=content) def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": content = _dict["content"] or "" return AIMessage(content=content) elif role == "system": return SystemMessage(content=_dict["content"]) else: return ChatMessage(content=_dict["content"], role=role) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, FunctionMessage): message_dict = { "role": "function", "name": message.name, "content": message.content, } else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict class JinaChat(BaseChatModel): """`Jina AI` Chat models API. To use, you should have the ``openai`` python package installed, and the environment variable ``JINACHAT_API_KEY`` set to your API key, which you can generate at https://chat.jina.ai/api. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain_community.chat_models import JinaChat chat = JinaChat() """ @property def lc_secrets(self) -> Dict[str, str]: return {"jinachat_api_key": "JINACHAT_API_KEY"} @classmethod def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" return False client: Any #: :meta private: temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" jinachat_api_key: Optional[SecretStr] = None """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to JinaChat completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["jinachat_api_key"] = convert_to_secret_str( get_from_dict_or_env(values, "jinachat_api_key", "JINACHAT_API_KEY") ) try: import openai except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling JinaChat API.""" return { "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "temperature": self.temperature, **self.model_kwargs, } def _create_retry_decorator(self) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(self.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(self, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = self._create_retry_decorator() @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return self.client.create(**kwargs) return _completion_with_retry(**kwargs) def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage: dict = {} for output in llm_outputs: if output is None: # Happens in streaming continue token_usage = output["token_usage"] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {"token_usage": overall_token_usage} def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, "stream": True} default_chunk_class = AIMessageChunk for chunk in self.completion_with_retry(messages=message_dicts, **params): delta = chunk["choices"][0]["delta"] chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.content) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: stream_iter = self._stream( messages=messages, stop=stop, run_manager=run_manager, **kwargs ) return generate_from_stream(stream_iter) message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} response = self.completion_with_retry(messages=message_dicts, **params) return self._create_chat_result(response) def _create_message_dicts( self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = dict(self._invocation_params) if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] for res in response["choices"]: message = _convert_dict_to_message(res["message"]) gen = ChatGeneration(message=message) generations.append(gen) llm_output = {"token_usage": response["usage"]} return ChatResult(generations=generations, llm_output=llm_output) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, "stream": True} default_chunk_class = AIMessageChunk async for chunk in await acompletion_with_retry( self, messages=message_dicts, **params ): delta = chunk["choices"][0]["delta"] chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: await run_manager.on_llm_new_token(chunk.content) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: stream_iter = self._astream( messages=messages, stop=stop, run_manager=run_manager, **kwargs ) return await agenerate_from_stream(stream_iter) message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} response = await acompletion_with_retry(self, messages=message_dicts, **params) return self._create_chat_result(response) @property def _invocation_params(self) -> Mapping[str, Any]: """Get the parameters used to invoke the model.""" jinachat_creds: Dict[str, Any] = { "api_key": self.jinachat_api_key and self.jinachat_api_key.get_secret_value(), "api_base": "https://api.chat.jina.ai/v1", "model": "jinachat", } return {**jinachat_creds, **self._default_params} @property def _llm_type(self) -> str: """Return type of chat model.""" return "jinachat"
[ "content" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~gradient_ai.py
import asyncio import logging from concurrent.futures import ThreadPoolExecutor from typing import Any, Dict, List, Mapping, Optional, Sequence, TypedDict import aiohttp import requests from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import BaseLLM from libs.core.langchain_core.outputs import Generation, LLMResult from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.llms.utils import enforce_stop_tokens class TrainResult(TypedDict): """Train result.""" loss: float class GradientLLM(BaseLLM): """Gradient.ai LLM Endpoints. GradientLLM is a class to interact with LLMs on gradient.ai To use, set the environment variable ``GRADIENT_ACCESS_TOKEN`` with your API token and ``GRADIENT_WORKSPACE_ID`` for your gradient workspace, or alternatively provide them as keywords to the constructor of this class. Example: .. code-block:: python from langchain_community.llms import GradientLLM GradientLLM( model="99148c6d-c2a0-4fbe-a4a7-e7c05bdb8a09_base_ml_model", model_kwargs={ "max_generated_token_count": 128, "temperature": 0.75, "top_p": 0.95, "top_k": 20, "stop": [], }, gradient_workspace_id="12345614fc0_workspace", gradient_access_token="gradientai-access_token", ) """ model_id: str = Field(alias="model", min_length=2) "Underlying gradient.ai model id (base or fine-tuned)." gradient_workspace_id: Optional[str] = None "Underlying gradient.ai workspace_id." gradient_access_token: Optional[str] = None """gradient.ai API Token, which can be generated by going to https://auth.gradient.ai/select-workspace and selecting "Access tokens" under the profile drop-down. """ model_kwargs: Optional[dict] = None """Keyword arguments to pass to the model.""" gradient_api_url: str = "https://api.gradient.ai/api" """Endpoint URL to use.""" aiosession: Optional[aiohttp.ClientSession] = None #: :meta private: """ClientSession, private, subject to change in upcoming releases.""" # LLM call kwargs class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True extra = Extra.forbid @root_validator(allow_reuse=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["gradient_access_token"] = get_from_dict_or_env( values, "gradient_access_token", "GRADIENT_ACCESS_TOKEN" ) values["gradient_workspace_id"] = get_from_dict_or_env( values, "gradient_workspace_id", "GRADIENT_WORKSPACE_ID" ) if ( values["gradient_access_token"] is None or len(values["gradient_access_token"]) < 10 ): raise ValueError("env variable `GRADIENT_ACCESS_TOKEN` must be set") if ( values["gradient_workspace_id"] is None or len(values["gradient_access_token"]) < 3 ): raise ValueError("env variable `GRADIENT_WORKSPACE_ID` must be set") if values["model_kwargs"]: kw = values["model_kwargs"] if not 0 <= kw.get("temperature", 0.5) <= 1: raise ValueError("`temperature` must be in the range [0.0, 1.0]") if not 0 <= kw.get("top_p", 0.5) <= 1: raise ValueError("`top_p` must be in the range [0.0, 1.0]") if 0 >= kw.get("top_k", 0.5): raise ValueError("`top_k` must be positive") if 0 >= kw.get("max_generated_token_count", 1): raise ValueError("`max_generated_token_count` must be positive") values["gradient_api_url"] = get_from_dict_or_env( values, "gradient_api_url", "GRADIENT_API_URL" ) try: import gradientai # noqa except ImportError: logging.warning( "DeprecationWarning: `GradientLLM` will use " "`pip install gradientai` in future releases of langchain." ) except Exception: pass return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"gradient_api_url": self.gradient_api_url}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "gradient" def _kwargs_post_fine_tune_request( self, inputs: Sequence[str], kwargs: Mapping[str, Any] ) -> Mapping[str, Any]: """Build the kwargs for the Post request, used by sync Args: prompt (str): prompt used in query kwargs (dict): model kwargs in payload Returns: Dict[str, Union[str,dict]]: _description_ """ _model_kwargs = self.model_kwargs or {} _params = {**_model_kwargs, **kwargs} multipliers = _params.get("multipliers", None) return dict( url=f"{self.gradient_api_url}/models/{self.model_id}/fine-tune", headers={ "authorization": f"Bearer {self.gradient_access_token}", "x-gradient-workspace-id": f"{self.gradient_workspace_id}", "accept": "application/json", "content-type": "application/json", }, json=dict( samples=tuple( { "inputs": input, } for input in inputs ) if multipliers is None else tuple( { "inputs": input, "fineTuningParameters": { "multiplier": multiplier, }, } for input, multiplier in zip(inputs, multipliers) ), ), ) def _kwargs_post_request( self, prompt: str, kwargs: Mapping[str, Any] ) -> Mapping[str, Any]: """Build the kwargs for the Post request, used by sync Args: prompt (str): prompt used in query kwargs (dict): model kwargs in payload Returns: Dict[str, Union[str,dict]]: _description_ """ _model_kwargs = self.model_kwargs or {} _params = {**_model_kwargs, **kwargs} return dict( url=f"{self.gradient_api_url}/models/{self.model_id}/complete", headers={ "authorization": f"Bearer {self.gradient_access_token}", "x-gradient-workspace-id": f"{self.gradient_workspace_id}", "accept": "application/json", "content-type": "application/json", }, json=dict( query=prompt, maxGeneratedTokenCount=_params.get("max_generated_token_count", None), temperature=_params.get("temperature", None), topK=_params.get("top_k", None), topP=_params.get("top_p", None), ), ) def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to Gradients API `model/{id}/complete`. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. """ try: response = requests.post(**self._kwargs_post_request(prompt, kwargs)) if response.status_code != 200: raise Exception( f"Gradient returned an unexpected response with status " f"{response.status_code}: {response.text}" ) except requests.exceptions.RequestException as e: raise Exception(f"RequestException while calling Gradient Endpoint: {e}") text = response.json()["generatedOutput"] if stop is not None: # Apply stop tokens when making calls to Gradient text = enforce_stop_tokens(text, stop) return text async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Async Call to Gradients API `model/{id}/complete`. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. """ if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.post( **self._kwargs_post_request(prompt=prompt, kwargs=kwargs) ) as response: if response.status != 200: raise Exception( f"Gradient returned an unexpected response with status " f"{response.status}: {response.text}" ) text = (await response.json())["generatedOutput"] else: async with self.aiosession.post( **self._kwargs_post_request(prompt=prompt, kwargs=kwargs) ) as response: if response.status != 200: raise Exception( f"Gradient returned an unexpected response with status " f"{response.status}: {response.text}" ) text = (await response.json())["generatedOutput"] if stop is not None: # Apply stop tokens when making calls to Gradient text = enforce_stop_tokens(text, stop) return text def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # same thing with threading def _inner_generate(prompt: str) -> List[Generation]: return [ Generation( text=self._call( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs ) ) ] if len(prompts) <= 1: generations = list(map(_inner_generate, prompts)) else: with ThreadPoolExecutor(min(8, len(prompts))) as p: generations = list(p.map(_inner_generate, prompts)) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] for generation in asyncio.gather( [self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs)] for prompt in prompts ): generations.append([Generation(text=generation)]) return LLMResult(generations=generations) def train_unsupervised( self, inputs: Sequence[str], **kwargs: Any, ) -> TrainResult: try: response = requests.post( **self._kwargs_post_fine_tune_request(inputs, kwargs) ) if response.status_code != 200: raise Exception( f"Gradient returned an unexpected response with status " f"{response.status_code}: {response.text}" ) except requests.exceptions.RequestException as e: raise Exception(f"RequestException while calling Gradient Endpoint: {e}") response_json = response.json() loss = response_json["sumLoss"] / response_json["numberOfTrainableTokens"] return TrainResult(loss=loss) async def atrain_unsupervised( self, inputs: Sequence[str], **kwargs: Any, ) -> TrainResult: if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.post( **self._kwargs_post_fine_tune_request(inputs, kwargs) ) as response: if response.status != 200: raise Exception( f"Gradient returned an unexpected response with status " f"{response.status}: {response.text}" ) response_json = await response.json() loss = ( response_json["sumLoss"] / response_json["numberOfTrainableTokens"] ) else: async with self.aiosession.post( **self._kwargs_post_fine_tune_request(inputs, kwargs) ) as response: if response.status != 200: raise Exception( f"Gradient returned an unexpected response with status " f"{response.status}: {response.text}" ) response_json = await response.json() loss = ( response_json["sumLoss"] / response_json["numberOfTrainableTokens"] ) return TrainResult(loss=loss)
[ "application/json" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~pinecone.py
from __future__ import annotations import logging import uuid import warnings from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, Union import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils.iter import batch_iterate from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) if TYPE_CHECKING: from pinecone import Index logger = logging.getLogger(__name__) class Pinecone(VectorStore): """`Pinecone` vector store. To use, you should have the ``pinecone-client`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import Pinecone from langchain_community.embeddings.openai import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") index = pinecone.Index("langchain-demo") embeddings = OpenAIEmbeddings() vectorstore = Pinecone(index, embeddings.embed_query, "text") """ def __init__( self, index: Any, embedding: Union[Embeddings, Callable], text_key: str, namespace: Optional[str] = None, distance_strategy: Optional[DistanceStrategy] = DistanceStrategy.COSINE, ): """Initialize with Pinecone client.""" try: import pinecone except ImportError: raise ImportError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) if not isinstance(embedding, Embeddings): warnings.warn( "Passing in `embedding` as a Callable is deprecated. Please pass in an" " Embeddings object instead." ) if not isinstance(index, pinecone.index.Index): raise ValueError( f"client should be an instance of pinecone.index.Index, " f"got {type(index)}" ) self._index = index self._embedding = embedding self._text_key = text_key self._namespace = namespace self.distance_strategy = distance_strategy @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" if isinstance(self._embedding, Embeddings): return self._embedding return None def _embed_documents(self, texts: Iterable[str]) -> List[List[float]]: """Embed search docs.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_documents(list(texts)) return [self._embedding(t) for t in texts] def _embed_query(self, text: str) -> List[float]: """Embed query text.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_query(text) return self._embedding(text) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, batch_size: int = 32, embedding_chunk_size: int = 1000, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Upsert optimization is done by chunking the embeddings and upserting them. This is done to avoid memory issues and optimize using HTTP based embeddings. For OpenAI embeddings, use pool_threads>4 when constructing the pinecone.Index, embedding_chunk_size>1000 and batch_size~64 for best performance. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. namespace: Optional pinecone namespace to add the texts to. batch_size: Batch size to use when adding the texts to the vectorstore. embedding_chunk_size: Chunk size to use when embedding the texts. Returns: List of ids from adding the texts into the vectorstore. """ if namespace is None: namespace = self._namespace texts = list(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] metadatas = metadatas or [{} for _ in texts] for metadata, text in zip(metadatas, texts): metadata[self._text_key] = text # For loops to avoid memory issues and optimize when using HTTP based embeddings # The first loop runs the embeddings, it benefits when using OpenAI embeddings # The second loops runs the pinecone upsert asynchronously. for i in range(0, len(texts), embedding_chunk_size): chunk_texts = texts[i : i + embedding_chunk_size] chunk_ids = ids[i : i + embedding_chunk_size] chunk_metadatas = metadatas[i : i + embedding_chunk_size] embeddings = self._embed_documents(chunk_texts) async_res = [ self._index.upsert( vectors=batch, namespace=namespace, async_req=True, **kwargs, ) for batch in batch_iterate( batch_size, zip(chunk_ids, embeddings, chunk_metadatas) ) ] [res.get() for res in async_res] return ids def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ return self.similarity_search_by_vector_with_score( self._embed_query(query), k=k, filter=filter, namespace=namespace ) def similarity_search_by_vector_with_score( self, embedding: List[float], *, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to embedding, along with scores.""" if namespace is None: namespace = self._namespace docs = [] results = self._index.query( [embedding], top_k=k, include_metadata=True, namespace=namespace, filter=filter, ) for res in results["matches"]: metadata = res["metadata"] if self._text_key in metadata: text = metadata.pop(self._text_key) score = res["score"] docs.append((Document(page_content=text, metadata=metadata), score)) else: logger.warning( f"Found document with no `{self._text_key}` key. Skipping." ) return docs def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return pinecone documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn else: raise ValueError( "Unknown distance strategy, must be cosine, max_inner_product " "(dot product), or euclidean" ) @staticmethod def _cosine_relevance_score_fn(score: float) -> float: """Pinecone returns cosine similarity scores between [-1,1]""" return (score + 1) / 2 def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if namespace is None: namespace = self._namespace results = self._index.query( [embedding], top_k=fetch_k, include_values=True, include_metadata=True, namespace=namespace, filter=filter, ) mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), [item["values"] for item in results["matches"]], k=k, lambda_mult=lambda_mult, ) selected = [results["matches"][i]["metadata"] for i in mmr_selected] return [ Document(page_content=metadata.pop((self._text_key)), metadata=metadata) for metadata in selected ] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter, namespace ) @classmethod def get_pinecone_index( cls, index_name: Optional[str], pool_threads: int = 4, ) -> Index: """Return a Pinecone Index instance. Args: index_name: Name of the index to use. pool_threads: Number of threads to use for index upsert. Returns: Pinecone Index instance.""" try: import pinecone except ImportError: raise ValueError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) indexes = pinecone.list_indexes() # checks if provided index exists if index_name in indexes: index = pinecone.Index(index_name, pool_threads=pool_threads) elif len(indexes) == 0: raise ValueError( "No active indexes found in your Pinecone project, " "are you sure you're using the right Pinecone API key and Environment? " "Please double check your Pinecone dashboard." ) else: raise ValueError( f"Index '{index_name}' not found in your Pinecone project. " f"Did you mean one of the following indexes: {', '.join(indexes)}" ) return index @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, text_key: str = "text", namespace: Optional[str] = None, index_name: Optional[str] = None, upsert_kwargs: Optional[dict] = None, pool_threads: int = 4, embeddings_chunk_size: int = 1000, **kwargs: Any, ) -> Pinecone: """Construct Pinecone wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Pinecone index This is intended to be a quick way to get started. The `pool_threads` affects the speed of the upsert operations. Example: .. code-block:: python from langchain_community.vectorstores import Pinecone from langchain_community.embeddings import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") embeddings = OpenAIEmbeddings() pinecone = Pinecone.from_texts( texts, embeddings, index_name="langchain-demo" ) """ pinecone_index = cls.get_pinecone_index(index_name, pool_threads) pinecone = cls(pinecone_index, embedding, text_key, namespace, **kwargs) pinecone.add_texts( texts, metadatas=metadatas, ids=ids, namespace=namespace, batch_size=batch_size, embedding_chunk_size=embeddings_chunk_size, **(upsert_kwargs or {}), ) return pinecone @classmethod def from_existing_index( cls, index_name: str, embedding: Embeddings, text_key: str = "text", namespace: Optional[str] = None, pool_threads: int = 4, ) -> Pinecone: """Load pinecone vectorstore from index name.""" pinecone_index = cls.get_pinecone_index(index_name, pool_threads) return cls(pinecone_index, embedding, text_key, namespace) def delete( self, ids: Optional[List[str]] = None, delete_all: Optional[bool] = None, namespace: Optional[str] = None, filter: Optional[dict] = None, **kwargs: Any, ) -> None: """Delete by vector IDs or filter. Args: ids: List of ids to delete. filter: Dictionary of conditions to filter vectors to delete. """ if namespace is None: namespace = self._namespace if delete_all: self._index.delete(delete_all=True, namespace=namespace, **kwargs) elif ids is not None: chunk_size = 1000 for i in range(0, len(ids), chunk_size): chunk = ids[i : i + chunk_size] self._index.delete(ids=chunk, namespace=namespace, **kwargs) elif filter is not None: self._index.delete(filter=filter, namespace=namespace, **kwargs) else: raise ValueError("Either ids, delete_all, or filter must be provided.") return None
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~graph_qa~falkordb.py
"""Question answering over a graph.""" from __future__ import annotations import re from typing import Any, Dict, List, Optional from libs.core.langchain_core.prompts import BasePromptTemplate from libs.core.langchain_core.pydantic_v1 import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs import FalkorDBGraph INTERMEDIATE_STEPS_KEY = "intermediate_steps" def extract_cypher(text: str) -> str: """ Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text. """ # The pattern to find Cypher code enclosed in triple backticks pattern = r"```(.*?)```" # Find all matches in the input text matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text class FalkorDBQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ graph: FalkorDBGraph = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: top_k: int = 10 """Number of results to return from the query""" return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the graph directly.""" @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @property def _chain_type(self) -> str: return "graph_cypher_chain" @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate = CYPHER_GENERATION_PROMPT, **kwargs: Any, ) -> FalkorDBQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt) return cls( qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.schema}, callbacks=callbacks ) # Extract Cypher code if it is wrapped in backticks generated_cypher = extract_cypher(generated_cypher) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"query": generated_cypher}) # Retrieve and limit the number of results context = self.graph.query(generated_cypher)[: self.top_k] if self.return_direct: final_result = context else: _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"context": context}) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~llm_requests.py
"""Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Any, Dict, List, Optional from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain.utilities.requests import TextRequestsWrapper DEFAULT_HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501 } class LLMRequestsChain(Chain): """Chain that requests a URL and then uses an LLM to parse results. **Security Note**: This chain can make GET requests to arbitrary URLs, including internal URLs. Control access to who can run this chain and what network access this chain has. See https://python.langchain.com/docs/security for more information. """ llm_chain: LLMChain requests_wrapper: TextRequestsWrapper = Field( default_factory=lambda: TextRequestsWrapper(headers=DEFAULT_HEADERS), exclude=True, ) text_length: int = 8000 requests_key: str = "requests_result" #: :meta private: input_key: str = "url" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [self.output_key] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ImportError( "Could not import bs4 python package. " "Please install it with `pip install bs4`." ) return values def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: from bs4 import BeautifulSoup _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} url = inputs[self.input_key] res = self.requests_wrapper.get(url) # extract the text from the html soup = BeautifulSoup(res, "html.parser") other_keys[self.requests_key] = soup.get_text()[: self.text_length] result = self.llm_chain.predict( callbacks=_run_manager.get_child(), **other_keys ) return {self.output_key: result} @property def _chain_type(self) -> str: return "llm_requests_chain"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~evernote.py
"""Load documents from Evernote. https://gist.github.com/foxmask/7b29c43a161e001ff04afdb2f181e31c """ import hashlib import logging from base64 import b64decode from time import strptime from typing import Any, Dict, Iterator, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class EverNoteLoader(BaseLoader): """Load from `EverNote`. Loads an EverNote notebook export file e.g. my_notebook.enex into Documents. Instructions on producing this file can be found at https://help.evernote.com/hc/en-us/articles/209005557-Export-notes-and-notebooks-as-ENEX-or-HTML Currently only the plain text in the note is extracted and stored as the contents of the Document, any non content metadata (e.g. 'author', 'created', 'updated' etc. but not 'content-raw' or 'resource') tags on the note will be extracted and stored as metadata on the Document. Args: file_path (str): The path to the notebook export with a .enex extension load_single_document (bool): Whether or not to concatenate the content of all notes into a single long Document. If this is set to True (default) then the only metadata on the document will be the 'source' which contains the file name of the export. """ # noqa: E501 def __init__(self, file_path: str, load_single_document: bool = True): """Initialize with file path.""" self.file_path = file_path self.load_single_document = load_single_document def load(self) -> List[Document]: """Load documents from EverNote export file.""" documents = [ Document( page_content=note["content"], metadata={ **{ key: value for key, value in note.items() if key not in ["content", "content-raw", "resource"] }, **{"source": self.file_path}, }, ) for note in self._parse_note_xml(self.file_path) if note.get("content") is not None ] if not self.load_single_document: return documents return [ Document( page_content="".join([document.page_content for document in documents]), metadata={"source": self.file_path}, ) ] @staticmethod def _parse_content(content: str) -> str: try: import html2text return html2text.html2text(content).strip() except ImportError as e: raise ImportError( "Could not import `html2text`. Although it is not a required package " "to use Langchain, using the EverNote loader requires `html2text`. " "Please install `html2text` via `pip install html2text` and try again." ) from e @staticmethod def _parse_resource(resource: list) -> dict: rsc_dict: Dict[str, Any] = {} for elem in resource: if elem.tag == "data": # Sometimes elem.text is None rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b"" rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest() else: rsc_dict[elem.tag] = elem.text return rsc_dict @staticmethod def _parse_note(note: List, prefix: Optional[str] = None) -> dict: note_dict: Dict[str, Any] = {} resources = [] def add_prefix(element_tag: str) -> str: if prefix is None: return element_tag return f"{prefix}.{element_tag}" for elem in note: if elem.tag == "content": note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text) # A copy of original content note_dict["content-raw"] = elem.text elif elem.tag == "resource": resources.append(EverNoteLoader._parse_resource(elem)) elif elem.tag == "created" or elem.tag == "updated": note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ") elif elem.tag == "note-attributes": additional_attributes = EverNoteLoader._parse_note( elem, elem.tag ) # Recursively enter the note-attributes tag note_dict.update(additional_attributes) else: note_dict[elem.tag] = elem.text if len(resources) > 0: note_dict["resource"] = resources return {add_prefix(key): value for key, value in note_dict.items()} @staticmethod def _parse_note_xml(xml_file: str) -> Iterator[Dict[str, Any]]: """Parse Evernote xml.""" # Without huge_tree set to True, parser may complain about huge text node # Try to recover, because there may be "&nbsp;", which will cause # "XMLSyntaxError: Entity 'nbsp' not defined" try: from lxml import etree except ImportError as e: logger.error( "Could not import `lxml`. Although it is not a required package to use " "Langchain, using the EverNote loader requires `lxml`. Please install " "`lxml` via `pip install lxml` and try again." ) raise e context = etree.iterparse( xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True ) for action, elem in context: if elem.tag == "note": yield EverNoteLoader._parse_note(elem)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~retrievers~wikipedia.py
from typing import List from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun from libs.core.langchain_core.documents import Document from libs.core.langchain_core.retrievers import BaseRetriever from langchain_community.utilities.wikipedia import WikipediaAPIWrapper class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper): """`Wikipedia API` retriever. It wraps load() to get_relevant_documents(). It uses all WikipediaAPIWrapper arguments without any change. """ def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: return self.load(query=query)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_transformers~doctran_text_qa.py
from typing import Any, Optional, Sequence from libs.core.langchain_core.documents import BaseDocumentTransformer, Document from libs.core.langchain_core.utils import get_from_env class DoctranQATransformer(BaseDocumentTransformer): """Extract QA from text documents using doctran. Arguments: openai_api_key: OpenAI API key. Can also be specified via environment variable ``OPENAI_API_KEY``. Example: .. code-block:: python from langchain_community.document_transformers import DoctranQATransformer # Pass in openai_api_key or set env var OPENAI_API_KEY qa_transformer = DoctranQATransformer() transformed_document = await qa_transformer.atransform_documents(documents) """ def __init__( self, openai_api_key: Optional[str] = None, openai_api_model: Optional[str] = None, ) -> None: self.openai_api_key = openai_api_key or get_from_env( "openai_api_key", "OPENAI_API_KEY" ) self.openai_api_model = openai_api_model or get_from_env( "openai_api_model", "OPENAI_API_MODEL" ) async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: raise NotImplementedError def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Extracts QA from text documents using doctran.""" try: from doctran import Doctran doctran = Doctran( openai_api_key=self.openai_api_key, openai_model=self.openai_api_model ) except ImportError: raise ImportError( "Install doctran to use this parser. (pip install doctran)" ) for d in documents: doctran_doc = doctran.parse(content=d.page_content).interrogate().execute() questions_and_answers = doctran_doc.extracted_properties.get( "questions_and_answers" ) d.metadata["questions_and_answers"] = questions_and_answers return documents
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~load~test_load.py
"""Test for Serializable base class""" import pytest from libs.core.langchain_core.load.dump import dumpd, dumps from libs.core.langchain_core.load.load import load, loads from libs.core.langchain_core.prompts.prompt import PromptTemplate from langchain.chains.llm import LLMChain from langchain.llms.openai import OpenAI class NotSerializable: pass @pytest.mark.requires("openai") def test_loads_openai_llm() -> None: llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") llm_string = dumps(llm) llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"}) assert llm2 == llm assert dumps(llm2) == llm_string assert isinstance(llm2, OpenAI) @pytest.mark.requires("openai") def test_loads_llmchain() -> None: llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain) chain2 = loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"}) assert chain2 == chain assert dumps(chain2) == chain_string assert isinstance(chain2, LLMChain) assert isinstance(chain2.llm, OpenAI) assert isinstance(chain2.prompt, PromptTemplate) @pytest.mark.requires("openai") def test_loads_llmchain_env() -> None: import os has_env = "OPENAI_API_KEY" in os.environ if not has_env: os.environ["OPENAI_API_KEY"] = "env_variable" llm = OpenAI(model="davinci", temperature=0.5) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain) chain2 = loads(chain_string) assert chain2 == chain assert dumps(chain2) == chain_string assert isinstance(chain2, LLMChain) assert isinstance(chain2.llm, OpenAI) assert isinstance(chain2.prompt, PromptTemplate) if not has_env: del os.environ["OPENAI_API_KEY"] @pytest.mark.requires("openai") def test_loads_llmchain_with_non_serializable_arg() -> None: llm = OpenAI( model="davinci", temperature=0.5, openai_api_key="hello", http_client=NotSerializable, ) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain, pretty=True) with pytest.raises(NotImplementedError): loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"}) @pytest.mark.requires("openai") def test_load_openai_llm() -> None: llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") llm_obj = dumpd(llm) llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"}) assert llm2 == llm assert dumpd(llm2) == llm_obj assert isinstance(llm2, OpenAI) @pytest.mark.requires("openai") def test_load_llmchain() -> None: llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_obj = dumpd(chain) chain2 = load(chain_obj, secrets_map={"OPENAI_API_KEY": "hello"}) assert chain2 == chain assert dumpd(chain2) == chain_obj assert isinstance(chain2, LLMChain) assert isinstance(chain2.llm, OpenAI) assert isinstance(chain2.prompt, PromptTemplate) @pytest.mark.requires("openai") def test_load_llmchain_env() -> None: import os has_env = "OPENAI_API_KEY" in os.environ if not has_env: os.environ["OPENAI_API_KEY"] = "env_variable" llm = OpenAI(model="davinci", temperature=0.5) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_obj = dumpd(chain) chain2 = load(chain_obj) assert chain2 == chain assert dumpd(chain2) == chain_obj assert isinstance(chain2, LLMChain) assert isinstance(chain2.llm, OpenAI) assert isinstance(chain2.prompt, PromptTemplate) if not has_env: del os.environ["OPENAI_API_KEY"] @pytest.mark.requires("openai") def test_load_llmchain_with_non_serializable_arg() -> None: llm = OpenAI( model="davinci", temperature=0.5, openai_api_key="hello", http_client=NotSerializable, ) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_obj = dumpd(chain) with pytest.raises(NotImplementedError): load(chain_obj, secrets_map={"OPENAI_API_KEY": "hello"})
[ "hello {name}!" ]
2024-01-10
mth93/langchain
libs~langchain~langchain~__init__.py
# ruff: noqa: E402 """Main entrypoint into package.""" import warnings from importlib import metadata from typing import Any, Optional from libs.core.langchain_core._api.deprecation import surface_langchain_deprecation_warnings verbose = False try: __version__ = metadata.version(__package__) except metadata.PackageNotFoundError: # Case where package metadata is not available. __version__ = "" del metadata # optional, avoids polluting the results of dir(__package__) def _is_interactive_env() -> bool: """Determine if running within IPython or Jupyter.""" import sys return hasattr(sys, "ps2") def _warn_on_import(name: str, replacement: Optional[str] = None) -> None: """Warn on import of deprecated module.""" if _is_interactive_env(): # No warnings for interactive environments. # This is done to avoid polluting the output of interactive environments # where users rely on auto-complete and may trigger this warning # even if they are not using any deprecated modules return if replacement: warnings.warn( f"Importing {name} from langchain root module is no longer supported. " f"Please use {replacement} instead." ) else: warnings.warn( f"Importing {name} from langchain root module is no longer supported." ) # Surfaces Deprecation and Pending Deprecation warnings from langchain. surface_langchain_deprecation_warnings() def __getattr__(name: str) -> Any: if name == "MRKLChain": from langchain.agents import MRKLChain _warn_on_import(name, replacement="langchain.agents.MRKLChain") return MRKLChain elif name == "ReActChain": from langchain.agents import ReActChain _warn_on_import(name, replacement="langchain.agents.ReActChain") return ReActChain elif name == "SelfAskWithSearchChain": from langchain.agents import SelfAskWithSearchChain _warn_on_import(name, replacement="langchain.agents.SelfAskWithSearchChain") return SelfAskWithSearchChain elif name == "ConversationChain": from langchain.chains import ConversationChain _warn_on_import(name, replacement="langchain.chains.ConversationChain") return ConversationChain elif name == "LLMBashChain": raise ImportError( "This module has been moved to langchain-experimental. " "For more details: " "https://github.com/langchain-ai/langchain/discussions/11352." "To access this code, install it with `pip install langchain-experimental`." "`from langchain_experimental.llm_bash.base " "import LLMBashChain`" ) elif name == "LLMChain": from langchain.chains import LLMChain _warn_on_import(name, replacement="langchain.chains.LLMChain") return LLMChain elif name == "LLMCheckerChain": from langchain.chains import LLMCheckerChain _warn_on_import(name, replacement="langchain.chains.LLMCheckerChain") return LLMCheckerChain elif name == "LLMMathChain": from langchain.chains import LLMMathChain _warn_on_import(name, replacement="langchain.chains.LLMMathChain") return LLMMathChain elif name == "QAWithSourcesChain": from langchain.chains import QAWithSourcesChain _warn_on_import(name, replacement="langchain.chains.QAWithSourcesChain") return QAWithSourcesChain elif name == "VectorDBQA": from langchain.chains import VectorDBQA _warn_on_import(name, replacement="langchain.chains.VectorDBQA") return VectorDBQA elif name == "VectorDBQAWithSourcesChain": from langchain.chains import VectorDBQAWithSourcesChain _warn_on_import(name, replacement="langchain.chains.VectorDBQAWithSourcesChain") return VectorDBQAWithSourcesChain elif name == "InMemoryDocstore": from langchain.docstore import InMemoryDocstore _warn_on_import(name, replacement="langchain.docstore.InMemoryDocstore") return InMemoryDocstore elif name == "Wikipedia": from langchain.docstore import Wikipedia _warn_on_import(name, replacement="langchain.docstore.Wikipedia") return Wikipedia elif name == "Anthropic": from langchain.llms import Anthropic _warn_on_import(name, replacement="langchain.llms.Anthropic") return Anthropic elif name == "Banana": from langchain.llms import Banana _warn_on_import(name, replacement="langchain.llms.Banana") return Banana elif name == "CerebriumAI": from langchain.llms import CerebriumAI _warn_on_import(name, replacement="langchain.llms.CerebriumAI") return CerebriumAI elif name == "Cohere": from langchain.llms import Cohere _warn_on_import(name, replacement="langchain.llms.Cohere") return Cohere elif name == "ForefrontAI": from langchain.llms import ForefrontAI _warn_on_import(name, replacement="langchain.llms.ForefrontAI") return ForefrontAI elif name == "GooseAI": from langchain.llms import GooseAI _warn_on_import(name, replacement="langchain.llms.GooseAI") return GooseAI elif name == "HuggingFaceHub": from langchain.llms import HuggingFaceHub _warn_on_import(name, replacement="langchain.llms.HuggingFaceHub") return HuggingFaceHub elif name == "HuggingFaceTextGenInference": from langchain.llms import HuggingFaceTextGenInference _warn_on_import(name, replacement="langchain.llms.HuggingFaceTextGenInference") return HuggingFaceTextGenInference elif name == "LlamaCpp": from langchain.llms import LlamaCpp _warn_on_import(name, replacement="langchain.llms.LlamaCpp") return LlamaCpp elif name == "Modal": from langchain.llms import Modal _warn_on_import(name, replacement="langchain.llms.Modal") return Modal elif name == "OpenAI": from langchain.llms import OpenAI _warn_on_import(name, replacement="langchain.llms.OpenAI") return OpenAI elif name == "Petals": from langchain.llms import Petals _warn_on_import(name, replacement="langchain.llms.Petals") return Petals elif name == "PipelineAI": from langchain.llms import PipelineAI _warn_on_import(name, replacement="langchain.llms.PipelineAI") return PipelineAI elif name == "SagemakerEndpoint": from langchain.llms import SagemakerEndpoint _warn_on_import(name, replacement="langchain.llms.SagemakerEndpoint") return SagemakerEndpoint elif name == "StochasticAI": from langchain.llms import StochasticAI _warn_on_import(name, replacement="langchain.llms.StochasticAI") return StochasticAI elif name == "Writer": from langchain.llms import Writer _warn_on_import(name, replacement="langchain.llms.Writer") return Writer elif name == "HuggingFacePipeline": from langchain.llms.huggingface_pipeline import HuggingFacePipeline _warn_on_import( name, replacement="langchain.llms.huggingface_pipeline.HuggingFacePipeline" ) return HuggingFacePipeline elif name == "FewShotPromptTemplate": from libs.core.langchain_core.prompts import FewShotPromptTemplate _warn_on_import(name, replacement="langchain.prompts.FewShotPromptTemplate") return FewShotPromptTemplate elif name == "Prompt": from langchain.prompts import Prompt _warn_on_import(name, replacement="langchain.prompts.Prompt") return Prompt elif name == "PromptTemplate": from libs.core.langchain_core.prompts import PromptTemplate _warn_on_import(name, replacement="langchain.prompts.PromptTemplate") return PromptTemplate elif name == "BasePromptTemplate": from libs.core.langchain_core.prompts import BasePromptTemplate _warn_on_import( name, replacement="langchain.schema.prompt_template.BasePromptTemplate" ) return BasePromptTemplate elif name == "ArxivAPIWrapper": from langchain.utilities import ArxivAPIWrapper _warn_on_import(name, replacement="langchain.utilities.ArxivAPIWrapper") return ArxivAPIWrapper elif name == "GoldenQueryAPIWrapper": from langchain.utilities import GoldenQueryAPIWrapper _warn_on_import(name, replacement="langchain.utilities.GoldenQueryAPIWrapper") return GoldenQueryAPIWrapper elif name == "GoogleSearchAPIWrapper": from langchain.utilities import GoogleSearchAPIWrapper _warn_on_import(name, replacement="langchain.utilities.GoogleSearchAPIWrapper") return GoogleSearchAPIWrapper elif name == "GoogleSerperAPIWrapper": from langchain.utilities import GoogleSerperAPIWrapper _warn_on_import(name, replacement="langchain.utilities.GoogleSerperAPIWrapper") return GoogleSerperAPIWrapper elif name == "PowerBIDataset": from langchain.utilities import PowerBIDataset _warn_on_import(name, replacement="langchain.utilities.PowerBIDataset") return PowerBIDataset elif name == "SearxSearchWrapper": from langchain.utilities import SearxSearchWrapper _warn_on_import(name, replacement="langchain.utilities.SearxSearchWrapper") return SearxSearchWrapper elif name == "WikipediaAPIWrapper": from langchain.utilities import WikipediaAPIWrapper _warn_on_import(name, replacement="langchain.utilities.WikipediaAPIWrapper") return WikipediaAPIWrapper elif name == "WolframAlphaAPIWrapper": from langchain.utilities import WolframAlphaAPIWrapper _warn_on_import(name, replacement="langchain.utilities.WolframAlphaAPIWrapper") return WolframAlphaAPIWrapper elif name == "SQLDatabase": from langchain.utilities import SQLDatabase _warn_on_import(name, replacement="langchain.utilities.SQLDatabase") return SQLDatabase elif name == "FAISS": from langchain.vectorstores import FAISS _warn_on_import(name, replacement="langchain.vectorstores.FAISS") return FAISS elif name == "ElasticVectorSearch": from langchain.vectorstores import ElasticVectorSearch _warn_on_import(name, replacement="langchain.vectorstores.ElasticVectorSearch") return ElasticVectorSearch # For backwards compatibility elif name == "SerpAPIChain" or name == "SerpAPIWrapper": from langchain.utilities import SerpAPIWrapper _warn_on_import(name, replacement="langchain.utilities.SerpAPIWrapper") return SerpAPIWrapper elif name == "verbose": from langchain.globals import _verbose _warn_on_import( name, replacement=( "langchain.globals.set_verbose() / langchain.globals.get_verbose()" ), ) return _verbose elif name == "debug": from langchain.globals import _debug _warn_on_import( name, replacement=( "langchain.globals.set_debug() / langchain.globals.get_debug()" ), ) return _debug elif name == "llm_cache": from langchain.globals import _llm_cache _warn_on_import( name, replacement=( "langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()" ), ) return _llm_cache else: raise AttributeError(f"Could not find: {name}") __all__ = [ "LLMChain", "LLMCheckerChain", "LLMMathChain", "ArxivAPIWrapper", "GoldenQueryAPIWrapper", "SelfAskWithSearchChain", "SerpAPIWrapper", "SerpAPIChain", "SearxSearchWrapper", "GoogleSearchAPIWrapper", "GoogleSerperAPIWrapper", "WolframAlphaAPIWrapper", "WikipediaAPIWrapper", "Anthropic", "Banana", "CerebriumAI", "Cohere", "ForefrontAI", "GooseAI", "Modal", "OpenAI", "Petals", "PipelineAI", "StochasticAI", "Writer", "BasePromptTemplate", "Prompt", "FewShotPromptTemplate", "PromptTemplate", "ReActChain", "Wikipedia", "HuggingFaceHub", "SagemakerEndpoint", "HuggingFacePipeline", "SQLDatabase", "PowerBIDataset", "FAISS", "MRKLChain", "VectorDBQA", "ElasticVectorSearch", "InMemoryDocstore", "ConversationChain", "VectorDBQAWithSourcesChain", "QAWithSourcesChain", "LlamaCpp", "HuggingFaceTextGenInference", ]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~notion.py
from pathlib import Path from typing import List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class NotionDirectoryLoader(BaseLoader): """Load `Notion directory` dump.""" def __init__(self, path: str, *, encoding: str = "utf-8") -> None: """Initialize with a file path.""" self.file_path = path self.encoding = encoding def load(self) -> List[Document]: """Load documents.""" paths = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in paths: with open(p, encoding=self.encoding) as f: text = f.read() metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~gpt4all.py
from typing import Any, Dict, List from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator class GPT4AllEmbeddings(BaseModel, Embeddings): """GPT4All embedding models. To use, you should have the gpt4all python package installed Example: .. code-block:: python from langchain_community.embeddings import GPT4AllEmbeddings embeddings = GPT4AllEmbeddings() """ client: Any #: :meta private: @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that GPT4All library is installed.""" try: from gpt4all import Embed4All values["client"] = Embed4All() except ImportError: raise ImportError( "Could not import gpt4all library. " "Please install the gpt4all library to " "use this embedding model: pip install gpt4all" ) return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using GPT4All. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] def embed_query(self, text: str) -> List[float]: """Embed a query using GPT4All. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed_documents([text])[0]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~assemblyai.py
from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader if TYPE_CHECKING: import assemblyai class TranscriptFormat(Enum): """Transcript format to use for the document loader.""" TEXT = "text" """One document with the transcription text""" SENTENCES = "sentences" """Multiple documents, splits the transcription by each sentence""" PARAGRAPHS = "paragraphs" """Multiple documents, splits the transcription by each paragraph""" SUBTITLES_SRT = "subtitles_srt" """One document with the transcript exported in SRT subtitles format""" SUBTITLES_VTT = "subtitles_vtt" """One document with the transcript exported in VTT subtitles format""" class AssemblyAIAudioTranscriptLoader(BaseLoader): """ Loader for AssemblyAI audio transcripts. It uses the AssemblyAI API to transcribe audio files and loads the transcribed text into one or more Documents, depending on the specified format. To use, you should have the ``assemblyai`` python package installed, and the environment variable ``ASSEMBLYAI_API_KEY`` set with your API key. Alternatively, the API key can also be passed as an argument. Audio files can be specified via an URL or a local file path. """ def __init__( self, file_path: str, *, transcript_format: TranscriptFormat = TranscriptFormat.TEXT, config: Optional[assemblyai.TranscriptionConfig] = None, api_key: Optional[str] = None, ): """ Initializes the AssemblyAI AudioTranscriptLoader. Args: file_path: An URL or a local file path. transcript_format: Transcript format to use. See class ``TranscriptFormat`` for more info. config: Transcription options and features. If ``None`` is given, the Transcriber's default configuration will be used. api_key: AssemblyAI API key. """ try: import assemblyai except ImportError: raise ImportError( "Could not import assemblyai python package. " "Please install it with `pip install assemblyai`." ) if api_key is not None: assemblyai.settings.api_key = api_key self.file_path = file_path self.transcript_format = transcript_format self.transcriber = assemblyai.Transcriber(config=config) def load(self) -> List[Document]: """Transcribes the audio file and loads the transcript into documents. It uses the AssemblyAI API to transcribe the audio file and blocks until the transcription is finished. """ transcript = self.transcriber.transcribe(self.file_path) # This will raise a ValueError if no API key is set. if transcript.error: raise ValueError(f"Could not transcribe file: {transcript.error}") if self.transcript_format == TranscriptFormat.TEXT: return [ Document( page_content=transcript.text, metadata=transcript.json_response ) ] elif self.transcript_format == TranscriptFormat.SENTENCES: sentences = transcript.get_sentences() return [ Document(page_content=s.text, metadata=s.dict(exclude={"text"})) for s in sentences ] elif self.transcript_format == TranscriptFormat.PARAGRAPHS: paragraphs = transcript.get_paragraphs() return [ Document(page_content=p.text, metadata=p.dict(exclude={"text"})) for p in paragraphs ] elif self.transcript_format == TranscriptFormat.SUBTITLES_SRT: return [Document(page_content=transcript.export_subtitles_srt())] elif self.transcript_format == TranscriptFormat.SUBTITLES_VTT: return [Document(page_content=transcript.export_subtitles_vtt())] else: raise ValueError("Unknown transcript format.")
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~bedrock.py
from __future__ import annotations import json import warnings from abc import ABC from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Mapping, Optional from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.outputs import GenerationChunk from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.llms.utils import enforce_stop_tokens from langchain_community.utilities.anthropic import ( get_num_tokens_anthropic, get_token_ids_anthropic, ) if TYPE_CHECKING: from botocore.config import Config HUMAN_PROMPT = "\n\nHuman:" ASSISTANT_PROMPT = "\n\nAssistant:" ALTERNATION_ERROR = ( "Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'." ) def _add_newlines_before_ha(input_text: str) -> str: new_text = input_text for word in ["Human:", "Assistant:"]: new_text = new_text.replace(word, "\n\n" + word) for i in range(2): new_text = new_text.replace("\n\n\n" + word, "\n\n" + word) return new_text def _human_assistant_format(input_text: str) -> str: if input_text.count("Human:") == 0 or ( input_text.find("Human:") > input_text.find("Assistant:") and "Assistant:" in input_text ): input_text = HUMAN_PROMPT + " " + input_text # SILENT CORRECTION if input_text.count("Assistant:") == 0: input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION if input_text[: len("Human:")] == "Human:": input_text = "\n\n" + input_text input_text = _add_newlines_before_ha(input_text) count = 0 # track alternation for i in range(len(input_text)): if input_text[i : i + len(HUMAN_PROMPT)] == HUMAN_PROMPT: if count % 2 == 0: count += 1 else: warnings.warn(ALTERNATION_ERROR + f" Received {input_text}") if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT: if count % 2 == 1: count += 1 else: warnings.warn(ALTERNATION_ERROR + f" Received {input_text}") if count % 2 == 1: # Only saw Human, no Assistant input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION return input_text class LLMInputOutputAdapter: """Adapter class to prepare the inputs from Langchain to a format that LLM model expects. It also provides helper function to extract the generated text from the model response.""" provider_to_output_key_map = { "anthropic": "completion", "amazon": "outputText", "cohere": "text", "meta": "generation", } @classmethod def prepare_input( cls, provider: str, prompt: str, model_kwargs: Dict[str, Any] ) -> Dict[str, Any]: input_body = {**model_kwargs} if provider == "anthropic": input_body["prompt"] = _human_assistant_format(prompt) elif provider in ("ai21", "cohere", "meta"): input_body["prompt"] = prompt elif provider == "amazon": input_body = dict() input_body["inputText"] = prompt input_body["textGenerationConfig"] = {**model_kwargs} else: input_body["inputText"] = prompt if provider == "anthropic" and "max_tokens_to_sample" not in input_body: input_body["max_tokens_to_sample"] = 256 return input_body @classmethod def prepare_output(cls, provider: str, response: Any) -> str: if provider == "anthropic": response_body = json.loads(response.get("body").read().decode()) return response_body.get("completion") else: response_body = json.loads(response.get("body").read()) if provider == "ai21": return response_body.get("completions")[0].get("data").get("text") elif provider == "cohere": return response_body.get("generations")[0].get("text") elif provider == "meta": return response_body.get("generation") else: return response_body.get("results")[0].get("outputText") @classmethod def prepare_output_stream( cls, provider: str, response: Any, stop: Optional[List[str]] = None ) -> Iterator[GenerationChunk]: stream = response.get("body") if not stream: return if provider not in cls.provider_to_output_key_map: raise ValueError( f"Unknown streaming response output key for provider: {provider}" ) for event in stream: chunk = event.get("chunk") if chunk: chunk_obj = json.loads(chunk.get("bytes").decode()) if provider == "cohere" and ( chunk_obj["is_finished"] or chunk_obj[cls.provider_to_output_key_map[provider]] == "<EOS_TOKEN>" ): return # chunk obj format varies with provider yield GenerationChunk( text=chunk_obj[cls.provider_to_output_key_map[provider]] ) class BedrockBase(BaseModel, ABC): """Base class for Bedrock models.""" client: Any = Field(exclude=True) #: :meta private: region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. """ credentials_profile_name: Optional[str] = Field(default=None, exclude=True) """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ config: Optional[Config] = None """An optional botocore.config.Config instance to pass to the client.""" model_id: str """Id of the model to call, e.g., amazon.titan-text-express-v1, this is equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None """Keyword arguments to pass to the model.""" endpoint_url: Optional[str] = None """Needed if you don't want to default to us-east-1 endpoint""" streaming: bool = False """Whether to stream the results.""" provider_stop_sequence_key_name_map: Mapping[str, str] = { "anthropic": "stop_sequences", "amazon": "stopSequences", "ai21": "stop_sequences", "cohere": "stop_sequences", } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" # Skip creating new client if passed in constructor if values["client"] is not None: return values try: import boto3 if values["credentials_profile_name"] is not None: session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() values["region_name"] = get_from_dict_or_env( values, "region_name", "AWS_DEFAULT_REGION", default=session.region_name, ) client_params = {} if values["region_name"]: client_params["region_name"] = values["region_name"] if values["endpoint_url"]: client_params["endpoint_url"] = values["endpoint_url"] if values["config"]: client_params["config"] = values["config"] values["client"] = session.client("bedrock-runtime", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"model_kwargs": _model_kwargs}, } def _get_provider(self) -> str: return self.model_id.split(".")[0] @property def _model_is_anthropic(self) -> bool: return self._get_provider() == "anthropic" def _prepare_input_and_invoke( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: _model_kwargs = self.model_kwargs or {} provider = self._get_provider() params = {**_model_kwargs, **kwargs} input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params) body = json.dumps(input_body) accept = "application/json" contentType = "application/json" try: response = self.client.invoke_model( body=body, modelId=self.model_id, accept=accept, contentType=contentType ) text = LLMInputOutputAdapter.prepare_output(provider, response) except Exception as e: raise ValueError(f"Error raised by bedrock service: {e}") if stop is not None: text = enforce_stop_tokens(text, stop) return text def _prepare_input_and_invoke_stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: _model_kwargs = self.model_kwargs or {} provider = self._get_provider() if stop: if provider not in self.provider_stop_sequence_key_name_map: raise ValueError( f"Stop sequence key name for {provider} is not supported." ) # stop sequence from _generate() overrides # stop sequences in the class attribute _model_kwargs[self.provider_stop_sequence_key_name_map.get(provider)] = stop if provider == "cohere": _model_kwargs["stream"] = True params = {**_model_kwargs, **kwargs} input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params) body = json.dumps(input_body) try: response = self.client.invoke_model_with_response_stream( body=body, modelId=self.model_id, accept="application/json", contentType="application/json", ) except Exception as e: raise ValueError(f"Error raised by bedrock service: {e}") for chunk in LLMInputOutputAdapter.prepare_output_stream( provider, response, stop ): yield chunk if run_manager is not None: run_manager.on_llm_new_token(chunk.text, chunk=chunk) class Bedrock(LLM, BedrockBase): """Bedrock models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. """ """ Example: .. code-block:: python from bedrock_langchain.bedrock_llm import BedrockLLM llm = BedrockLLM( credentials_profile_name="default", model_id="amazon.titan-text-express-v1", streaming=True ) """ @property def _llm_type(self) -> str: """Return type of llm.""" return "amazon_bedrock" @classmethod def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" return True @classmethod def get_lc_namespace(cls) -> List[str]: """Get the namespace of the langchain object.""" return ["langchain", "llms", "bedrock"] @property def lc_attributes(self) -> Dict[str, Any]: attributes: Dict[str, Any] = {} if self.region_name: attributes["region_name"] = self.region_name return attributes class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Call out to Bedrock service with streaming. Args: prompt (str): The prompt to pass into the model stop (Optional[List[str]], optional): Stop sequences. These will override any stop sequences in the `model_kwargs` attribute. Defaults to None. run_manager (Optional[CallbackManagerForLLMRun], optional): Callback run managers used to process the output. Defaults to None. Returns: Iterator[GenerationChunk]: Generator that yields the streamed responses. Yields: Iterator[GenerationChunk]: Responses from the model. """ return self._prepare_input_and_invoke_stream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs ) def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Bedrock service model. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = llm("Tell me a joke.") """ if self.streaming: completion = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs ): completion += chunk.text return completion return self._prepare_input_and_invoke(prompt=prompt, stop=stop, **kwargs) def get_num_tokens(self, text: str) -> int: if self._model_is_anthropic: return get_num_tokens_anthropic(text) else: return super().get_num_tokens(text) def get_token_ids(self, text: str) -> List[int]: if self._model_is_anthropic: return get_token_ids_anthropic(text) else: return super().get_token_ids(text)
[ "\n\nAssistant:", "\n\nHuman:" ]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~cache~test_astradb.py
""" Test AstraDB caches. Requires an Astra DB vector instance. Required to run this test: - a recent `astrapy` Python package available - an Astra DB instance; - the two environment variables set: export ASTRA_DB_API_ENDPOINT="https://<DB-ID>-us-east1.apps.astra.datastax.com" export ASTRA_DB_APPLICATION_TOKEN="AstraCS:........." - optionally this as well (otherwise defaults are used): export ASTRA_DB_KEYSPACE="my_keyspace" """ import os from typing import Iterator import pytest from libs.core.langchain_core.outputs import Generation, LLMResult from langchain.cache import AstraDBCache, AstraDBSemanticCache from langchain.globals import get_llm_cache, set_llm_cache from tests.integration_tests.cache.fake_embeddings import FakeEmbeddings from tests.unit_tests.llms.fake_llm import FakeLLM def _has_env_vars() -> bool: return all( [ "ASTRA_DB_APPLICATION_TOKEN" in os.environ, "ASTRA_DB_API_ENDPOINT" in os.environ, ] ) @pytest.fixture(scope="module") def astradb_cache() -> Iterator[AstraDBCache]: cache = AstraDBCache( collection_name="lc_integration_test_cache", token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"], namespace=os.environ.get("ASTRA_DB_KEYSPACE"), ) yield cache cache.astra_db.delete_collection("lc_integration_test_cache") @pytest.fixture(scope="module") def astradb_semantic_cache() -> Iterator[AstraDBSemanticCache]: fake_embe = FakeEmbeddings() sem_cache = AstraDBSemanticCache( collection_name="lc_integration_test_sem_cache", token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"], namespace=os.environ.get("ASTRA_DB_KEYSPACE"), embedding=fake_embe, ) yield sem_cache sem_cache.astra_db.delete_collection("lc_integration_test_cache") @pytest.mark.requires("astrapy") @pytest.mark.skipif(not _has_env_vars(), reason="Missing Astra DB env. vars") class TestAstraDBCaches: def test_astradb_cache(self, astradb_cache: AstraDBCache) -> None: set_llm_cache(astradb_cache) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) output = llm.generate(["foo"]) print(output) expected_output = LLMResult( generations=[[Generation(text="fizz")]], llm_output={}, ) print(expected_output) assert output == expected_output astradb_cache.clear() def test_cassandra_semantic_cache( self, astradb_semantic_cache: AstraDBSemanticCache ) -> None: set_llm_cache(astradb_semantic_cache) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) output = llm.generate(["bar"]) # same embedding as 'foo' expected_output = LLMResult( generations=[[Generation(text="fizz")]], llm_output={}, ) assert output == expected_output # clear the cache astradb_semantic_cache.clear() output = llm.generate(["bar"]) # 'fizz' is erased away now assert output != expected_output astradb_semantic_cache.clear()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~timescalevector.py
"""VectorStore wrapper around a Postgres-TimescaleVector database.""" from __future__ import annotations import enum import logging import uuid from datetime import timedelta from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union, ) from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.utils import get_from_dict_or_env from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import DistanceStrategy if TYPE_CHECKING: from timescale_vector import Predicates DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain_store" class TimescaleVector(VectorStore): """Timescale Postgres vector store To use, you should have the ``timescale_vector`` python package installed. Args: service_url: Service url on timescale cloud. embedding: Any embedding function implementing `langchain.embeddings.base.Embeddings` interface. collection_name: The name of the collection to use. (default: langchain_store) This will become the table name used for the collection. distance_strategy: The distance strategy to use. (default: COSINE) pre_delete_collection: If True, will delete the collection if it exists. (default: False). Useful for testing. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings.openai import OpenAIEmbeddings SERVICE_URL = "postgres://tsdbadmin:<password>@<id>.tsdb.cloud.timescale.com:<port>/tsdb?sslmode=require" COLLECTION_NAME = "state_of_the_union_test" embeddings = OpenAIEmbeddings() vectorestore = TimescaleVector.from_documents( embedding=embeddings, documents=docs, collection_name=COLLECTION_NAME, service_url=SERVICE_URL, ) """ # noqa: E501 def __init__( self, service_url: str, embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, num_dimensions: int = ADA_TOKEN_COUNT, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, time_partition_interval: Optional[timedelta] = None, **kwargs: Any, ) -> None: try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) self.service_url = service_url self.embedding = embedding self.collection_name = collection_name self.num_dimensions = num_dimensions self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self._time_partition_interval = time_partition_interval self.sync_client = client.Sync( self.service_url, self.collection_name, self.num_dimensions, self._distance_strategy.value.lower(), time_partition_interval=self._time_partition_interval, **kwargs, ) self.async_client = client.Async( self.service_url, self.collection_name, self.num_dimensions, self._distance_strategy.value.lower(), time_partition_interval=self._time_partition_interval, **kwargs, ) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ self.sync_client.create_tables() if self.pre_delete_collection: self.sync_client.delete_all() @property def embeddings(self) -> Embeddings: return self.embedding def drop_tables(self) -> None: self.sync_client.drop_table() @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, service_url: Optional[str] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: num_dimensions = len(embeddings[0]) if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] if service_url is None: service_url = cls.get_service_url(kwargs) store = cls( service_url=service_url, num_dimensions=num_dimensions, collection_name=collection_name, embedding=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store @classmethod async def __afrom( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, service_url: Optional[str] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: num_dimensions = len(embeddings[0]) if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] if service_url is None: service_url = cls.get_service_url(kwargs) store = cls( service_url=service_url, num_dimensions=num_dimensions, collection_name=collection_name, embedding=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) await store.aadd_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] records = list(zip(ids, metadatas, texts, embeddings)) self.sync_client.upsert(records) return ids async def aadd_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] records = list(zip(ids, metadatas, texts, embeddings)) await self.async_client.upsert(records) return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return self.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return await self.aadd_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) def _embed_query(self, query: str) -> Optional[List[float]]: # an empty query should not be embedded if query is None or query == "" or query.isspace(): return None else: return self.embedding.embed_query(query) def similarity_search( self, query: str, k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with TimescaleVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self._embed_query(query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs, ) async def asimilarity_search( self, query: str, k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with TimescaleVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self._embed_query(query) return await self.asimilarity_search_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self._embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs, ) return docs async def asimilarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self._embed_query(query) return await self.asimilarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs, ) def date_to_range_filter(self, **kwargs: Any) -> Any: constructor_args = { key: kwargs[key] for key in [ "start_date", "end_date", "time_delta", "start_inclusive", "end_inclusive", ] if key in kwargs } if not constructor_args or len(constructor_args) == 0: return None try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) return client.UUIDTimeRange(**constructor_args) def similarity_search_with_score_by_vector( self, embedding: Optional[List[float]], k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) results = self.sync_client.search( embedding, limit=k, filter=filter, predicates=predicates, uuid_time_filter=self.date_to_range_filter(**kwargs), ) docs = [ ( Document( page_content=result[client.SEARCH_RESULT_CONTENTS_IDX], metadata=result[client.SEARCH_RESULT_METADATA_IDX], ), result[client.SEARCH_RESULT_DISTANCE_IDX], ) for result in results ] return docs async def asimilarity_search_with_score_by_vector( self, embedding: Optional[List[float]], k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) results = await self.async_client.search( embedding, limit=k, filter=filter, predicates=predicates, uuid_time_filter=self.date_to_range_filter(**kwargs), ) docs = [ ( Document( page_content=result[client.SEARCH_RESULT_CONTENTS_IDX], metadata=result[client.SEARCH_RESULT_METADATA_IDX], ), result[client.SEARCH_RESULT_DISTANCE_IDX], ) for result in results ] return docs def similarity_search_by_vector( self, embedding: Optional[List[float]], k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs ) return [doc for doc, _ in docs_and_scores] async def asimilarity_search_by_vector( self, embedding: Optional[List[float]], k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = await self.asimilarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Type[TimescaleVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod async def afrom_texts( cls: Type[TimescaleVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. """ embeddings = embedding.embed_documents(list(texts)) return await cls.__afrom( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """Construct TimescaleVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod async def afrom_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """Construct TimescaleVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return await cls.__afrom( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[TimescaleVector], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """ Get instance of an existing TimescaleVector store.This method will return the instance of the store without inserting any new embeddings """ service_url = cls.get_service_url(kwargs) store = cls( service_url=service_url, collection_name=collection_name, embedding=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, ) return store @classmethod def get_service_url(cls, kwargs: Dict[str, Any]) -> str: service_url: str = get_from_dict_or_env( data=kwargs, key="service_url", env_key="TIMESCALE_SERVICE_URL", ) if not service_url: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the TIMESCALE_SERVICE_URL environment variable." ) return service_url @classmethod def service_url_from_db_params( cls, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql://{user}:{password}@{host}:{port}/{database}" def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self._distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self._distance_strategy}." "Consider providing relevance_score_fn to TimescaleVector constructor." ) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if ids is None: raise ValueError("No ids provided to delete.") self.sync_client.delete_by_ids(ids) return True # todo should this be part of delete|()? def delete_by_metadata( self, filter: Union[Dict[str, str], List[Dict[str, str]]], **kwargs: Any ) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ self.sync_client.delete_by_metadata(filter) return True class IndexType(str, enum.Enum): """Enumerator for the supported Index types""" TIMESCALE_VECTOR = "tsv" PGVECTOR_IVFFLAT = "ivfflat" PGVECTOR_HNSW = "hnsw" DEFAULT_INDEX_TYPE = IndexType.TIMESCALE_VECTOR def create_index( self, index_type: Union[IndexType, str] = DEFAULT_INDEX_TYPE, **kwargs: Any ) -> None: try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) index_type = ( index_type.value if isinstance(index_type, self.IndexType) else index_type ) if index_type == self.IndexType.PGVECTOR_IVFFLAT.value: self.sync_client.create_embedding_index(client.IvfflatIndex(**kwargs)) if index_type == self.IndexType.PGVECTOR_HNSW.value: self.sync_client.create_embedding_index(client.HNSWIndex(**kwargs)) if index_type == self.IndexType.TIMESCALE_VECTOR.value: self.sync_client.create_embedding_index( client.TimescaleVectorIndex(**kwargs) ) def drop_index(self) -> None: self.sync_client.drop_embedding_index()
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~chat_models~test_jinachat.py
"""Test JinaChat wrapper.""" from typing import cast import pytest from libs.core.langchain_core.callbacks import CallbackManager from libs.core.langchain_core.messages import BaseMessage, HumanMessage, SystemMessage from libs.core.langchain_core.outputs import ChatGeneration, LLMResult from libs.core.langchain_core.pydantic_v1 import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.chat_models.jinachat import JinaChat from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_jinachat_api_key_is_secret_string() -> None: llm = JinaChat(jinachat_api_key="secret-api-key") assert isinstance(llm.jinachat_api_key, SecretStr) def test_jinachat_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("JINACHAT_API_KEY", "secret-api-key") llm = JinaChat() print(llm.jinachat_api_key, end="") captured = capsys.readouterr() assert captured.out == "**********" def test_jinachat_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" llm = JinaChat(jinachat_api_key="secret-api-key") print(llm.jinachat_api_key, end="") captured = capsys.readouterr() assert captured.out == "**********" def test_uses_actual_secret_value_from_secretstr() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" llm = JinaChat(jinachat_api_key="secret-api-key") assert cast(SecretStr, llm.jinachat_api_key).get_secret_value() == "secret-api-key" def test_jinachat() -> None: """Test JinaChat wrapper.""" chat = JinaChat(max_tokens=10) message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_jinachat_system_message() -> None: """Test JinaChat wrapper with system message.""" chat = JinaChat(max_tokens=10) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") response = chat([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_jinachat_generate() -> None: """Test JinaChat wrapper with generate.""" chat = JinaChat(max_tokens=10) message = HumanMessage(content="Hello") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content def test_jinachat_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = JinaChat( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Hello") response = chat([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) async def test_async_jinachat() -> None: """Test async generation.""" chat = JinaChat(max_tokens=102) message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content async def test_async_jinachat_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = JinaChat( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert callback_handler.llm_streams > 0 assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content def test_jinachat_extra_kwargs() -> None: """Test extra kwargs to chat openai.""" # Check that foo is saved in extra_kwargs. llm = JinaChat(foo=3, max_tokens=10) assert llm.max_tokens == 10 assert llm.model_kwargs == {"foo": 3} # Test that if extra_kwargs are provided, they are added to it. llm = JinaChat(foo=3, model_kwargs={"bar": 2}) assert llm.model_kwargs == {"foo": 3, "bar": 2} # Test that if provided twice it errors with pytest.raises(ValueError): JinaChat(foo=3, model_kwargs={"foo": 2}) # Test that if explicit param is specified in kwargs it errors with pytest.raises(ValueError): JinaChat(model_kwargs={"temperature": 0.2})
[ "Hello", "You are to chat with the user." ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~github.py
from abc import ABC from datetime import datetime from typing import Dict, Iterator, List, Literal, Optional, Union import requests from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator, validator from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.document_loaders.base import BaseLoader class BaseGitHubLoader(BaseLoader, BaseModel, ABC): """Load `GitHub` repository Issues.""" repo: str """Name of repository""" access_token: str """Personal access token - see https://github.com/settings/tokens?type=beta""" github_api_url: str = "https://api.github.com" """URL of GitHub API""" @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that access token exists in environment.""" values["access_token"] = get_from_dict_or_env( values, "access_token", "GITHUB_PERSONAL_ACCESS_TOKEN" ) return values @property def headers(self) -> Dict[str, str]: return { "Accept": "application/vnd.github+json", "Authorization": f"Bearer {self.access_token}", } class GitHubIssuesLoader(BaseGitHubLoader): """Load issues of a GitHub repository.""" include_prs: bool = True """If True include Pull Requests in results, otherwise ignore them.""" milestone: Union[int, Literal["*", "none"], None] = None """If integer is passed, it should be a milestone's number field. If the string '*' is passed, issues with any milestone are accepted. If the string 'none' is passed, issues without milestones are returned. """ state: Optional[Literal["open", "closed", "all"]] = None """Filter on issue state. Can be one of: 'open', 'closed', 'all'.""" assignee: Optional[str] = None """Filter on assigned user. Pass 'none' for no user and '*' for any user.""" creator: Optional[str] = None """Filter on the user that created the issue.""" mentioned: Optional[str] = None """Filter on a user that's mentioned in the issue.""" labels: Optional[List[str]] = None """Label names to filter one. Example: bug,ui,@high.""" sort: Optional[Literal["created", "updated", "comments"]] = None """What to sort results by. Can be one of: 'created', 'updated', 'comments'. Default is 'created'.""" direction: Optional[Literal["asc", "desc"]] = None """The direction to sort the results by. Can be one of: 'asc', 'desc'.""" since: Optional[str] = None """Only show notifications updated after the given time. This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ.""" @validator("since") def validate_since(cls, v: Optional[str]) -> Optional[str]: if v: try: datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ") except ValueError: raise ValueError( "Invalid value for 'since'. Expected a date string in " f"YYYY-MM-DDTHH:MM:SSZ format. Received: {v}" ) return v def lazy_load(self) -> Iterator[Document]: """ Get issues of a GitHub repository. Returns: A list of Documents with attributes: - page_content - metadata - url - title - creator - created_at - last_update_time - closed_time - number of comments - state - labels - assignee - assignees - milestone - locked - number - is_pull_request """ url: Optional[str] = self.url while url: response = requests.get(url, headers=self.headers) response.raise_for_status() issues = response.json() for issue in issues: doc = self.parse_issue(issue) if not self.include_prs and doc.metadata["is_pull_request"]: continue yield doc if response.links and response.links.get("next"): url = response.links["next"]["url"] else: url = None def load(self) -> List[Document]: """ Get issues of a GitHub repository. Returns: A list of Documents with attributes: - page_content - metadata - url - title - creator - created_at - last_update_time - closed_time - number of comments - state - labels - assignee - assignees - milestone - locked - number - is_pull_request """ return list(self.lazy_load()) def parse_issue(self, issue: dict) -> Document: """Create Document objects from a list of GitHub issues.""" metadata = { "url": issue["html_url"], "title": issue["title"], "creator": issue["user"]["login"], "created_at": issue["created_at"], "comments": issue["comments"], "state": issue["state"], "labels": [label["name"] for label in issue["labels"]], "assignee": issue["assignee"]["login"] if issue["assignee"] else None, "milestone": issue["milestone"]["title"] if issue["milestone"] else None, "locked": issue["locked"], "number": issue["number"], "is_pull_request": "pull_request" in issue, } content = issue["body"] if issue["body"] is not None else "" return Document(page_content=content, metadata=metadata) @property def query_params(self) -> str: """Create query parameters for GitHub API.""" labels = ",".join(self.labels) if self.labels else self.labels query_params_dict = { "milestone": self.milestone, "state": self.state, "assignee": self.assignee, "creator": self.creator, "mentioned": self.mentioned, "labels": labels, "sort": self.sort, "direction": self.direction, "since": self.since, } query_params_list = [ f"{k}={v}" for k, v in query_params_dict.items() if v is not None ] query_params = "&".join(query_params_list) return query_params @property def url(self) -> str: """Create URL for GitHub API.""" return f"{self.github_api_url}/repos/{self.repo}/issues?{self.query_params}"
[]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~retrievers~document_compressors~test_embeddings_filter.py
"""Integration test for embedding-based relevant doc filtering.""" import numpy as np from libs.core.langchain_core.documents import Document from langchain.document_transformers.embeddings_redundant_filter import ( _DocumentWithState, ) from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.document_compressors import EmbeddingsFilter def test_embeddings_filter() -> None: texts = [ "What happened to all of my cookies?", "I wish there were better Italian restaurants in my neighborhood.", "My favorite color is green", ] docs = [Document(page_content=t) for t in texts] embeddings = OpenAIEmbeddings() relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75) actual = relevant_filter.compress_documents(docs, "What did I say about food?") assert len(actual) == 2 assert len(set(texts[:2]).intersection([d.page_content for d in actual])) == 2 def test_embeddings_filter_with_state() -> None: texts = [ "What happened to all of my cookies?", "I wish there were better Italian restaurants in my neighborhood.", "My favorite color is green", ] query = "What did I say about food?" embeddings = OpenAIEmbeddings() embedded_query = embeddings.embed_query(query) state = {"embedded_doc": np.zeros(len(embedded_query))} docs = [_DocumentWithState(page_content=t, state=state) for t in texts] docs[-1].state = {"embedded_doc": embedded_query} relevant_filter = EmbeddingsFilter( embeddings=embeddings, similarity_threshold=0.75, return_similarity_scores=True ) actual = relevant_filter.compress_documents(docs, query) assert len(actual) == 1 assert texts[-1] == actual[0].page_content
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~spark_sql~toolkit.py
"""Toolkit for interacting with Spark SQL.""" from typing import List from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.pydantic_v1 import Field from langchain_community.agent_toolkits.base import BaseToolkit from langchain_community.tools import BaseTool from langchain_community.tools.spark_sql.tool import ( InfoSparkSQLTool, ListSparkSQLTool, QueryCheckerTool, QuerySparkSQLTool, ) from langchain_community.utilities.spark_sql import SparkSQL class SparkSQLToolkit(BaseToolkit): """Toolkit for interacting with Spark SQL.""" db: SparkSQL = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ QuerySparkSQLTool(db=self.db), InfoSparkSQLTool(db=self.db), ListSparkSQLTool(db=self.db), QueryCheckerTool(db=self.db, llm=self.llm), ]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~chromium.py
import asyncio import logging from typing import Iterator, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class AsyncChromiumLoader(BaseLoader): """Scrape HTML pages from URLs using a headless instance of the Chromium.""" def __init__( self, urls: List[str], ): """ Initialize the loader with a list of URL paths. Args: urls (List[str]): A list of URLs to scrape content from. Raises: ImportError: If the required 'playwright' package is not installed. """ self.urls = urls try: import playwright # noqa: F401 except ImportError: raise ImportError( "playwright is required for AsyncChromiumLoader. " "Please install it with `pip install playwright`." ) async def ascrape_playwright(self, url: str) -> str: """ Asynchronously scrape the content of a given URL using Playwright's async API. Args: url (str): The URL to scrape. Returns: str: The scraped HTML content or an error message if an exception occurs. """ from playwright.async_api import async_playwright logger.info("Starting scraping...") results = "" async with async_playwright() as p: browser = await p.chromium.launch(headless=True) try: page = await browser.new_page() await page.goto(url) results = await page.content() # Simply get the HTML content logger.info("Content scraped") except Exception as e: results = f"Error: {e}" await browser.close() return results def lazy_load(self) -> Iterator[Document]: """ Lazily load text content from the provided URLs. This method yields Documents one at a time as they're scraped, instead of waiting to scrape all URLs before returning. Yields: Document: The scraped content encapsulated within a Document object. """ for url in self.urls: html_content = asyncio.run(self.ascrape_playwright(url)) metadata = {"source": url} yield Document(page_content=html_content, metadata=metadata) def load(self) -> List[Document]: """ Load and return all Documents from the provided URLs. Returns: List[Document]: A list of Document objects containing the scraped content from each URL. """ return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~partners~anthropic~langchain_anthropic~chat_models.py
import os from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Tuple import anthropic from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from libs.core.langchain_core.pydantic_v1 import Field, SecretStr, root_validator from libs.core.langchain_core.utils import convert_to_secret_str _message_type_lookups = {"human": "user", "assistant": "ai"} def _format_messages(messages: List[BaseMessage]) -> Tuple[Optional[str], List[Dict]]: """Format messages for anthropic.""" """ [ { "role": _message_type_lookups[m.type], "content": [_AnthropicMessageContent(text=m.content).dict()], } for m in messages ] """ system = None formatted_messages = [] for i, message in enumerate(messages): if not isinstance(message.content, str): raise ValueError("Anthropic Messages API only supports text generation.") if message.type == "system": if i != 0: raise ValueError("System message must be at beginning of message list.") system = message.content else: formatted_messages.append( { "role": _message_type_lookups[message.type], "content": message.content, } ) return system, formatted_messages class ChatAnthropicMessages(BaseChatModel): """Beta ChatAnthropicMessages chat model. Example: .. code-block:: python from langchain_anthropic import ChatAnthropicMessages model = ChatAnthropicMessages() """ _client: anthropic.Client = Field(default_factory=anthropic.Client) _async_client: anthropic.AsyncClient = Field(default_factory=anthropic.AsyncClient) model: str = Field(alias="model_name") """Model name to use.""" max_tokens: int = Field(default=256) """Denotes the number of tokens to predict per generation.""" temperature: Optional[float] = None """A non-negative float that tunes the degree of randomness in generation.""" top_k: Optional[int] = None """Number of most likely tokens to consider at each step.""" top_p: Optional[float] = None """Total probability mass of tokens to consider at each step.""" default_request_timeout: Optional[float] = None """Timeout for requests to Anthropic Completion API. Default is 600 seconds.""" anthropic_api_url: str = "https://api.anthropic.com" anthropic_api_key: Optional[SecretStr] = None model_kwargs: Dict[str, Any] = Field(default_factory=dict) @property def _llm_type(self) -> str: """Return type of chat model.""" return "chat-anthropic-messages" @root_validator() def validate_environment(cls, values: Dict) -> Dict: anthropic_api_key = convert_to_secret_str( values.get("anthropic_api_key") or os.environ.get("ANTHROPIC_API_KEY") or "" ) values["anthropic_api_key"] = anthropic_api_key values["_client"] = anthropic.Client( api_key=anthropic_api_key.get_secret_value() ) values["_async_client"] = anthropic.AsyncClient( api_key=anthropic_api_key.get_secret_value() ) return values def _format_params( self, *, messages: List[BaseMessage], stop: Optional[List[str]] = None, **kwargs: Dict, ) -> Dict: # get system prompt if any system, formatted_messages = _format_messages(messages) rtn = { "model": self.model, "max_tokens": self.max_tokens, "messages": formatted_messages, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "stop_sequences": stop, "system": system, } rtn = {k: v for k, v in rtn.items() if v is not None} return rtn def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: params = self._format_params(messages=messages, stop=stop, **kwargs) with self._client.beta.messages.stream(**params) as stream: for text in stream.text_stream: yield ChatGenerationChunk(message=AIMessageChunk(content=text)) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: params = self._format_params(messages=messages, stop=stop, **kwargs) async with self._async_client.beta.messages.stream(**params) as stream: async for text in stream.text_stream: yield ChatGenerationChunk(message=AIMessageChunk(content=text)) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: params = self._format_params(messages=messages, stop=stop, **kwargs) data = self._client.beta.messages.create(**params) return ChatResult( generations=[ ChatGeneration(message=AIMessage(content=data.content[0].text)) ], llm_output=data, ) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: params = self._format_params(messages=messages, stop=stop, **kwargs) data = await self._async_client.beta.messages.create(**params) return ChatResult( generations=[ ChatGeneration(message=AIMessage(content=data.content[0].text)) ], llm_output=data, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~watsonxllm.py
import logging import os from typing import Any, Dict, Iterator, List, Mapping, Optional, Union from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import BaseLLM from libs.core.langchain_core.outputs import Generation, GenerationChunk, LLMResult from libs.core.langchain_core.pydantic_v1 import Extra, SecretStr, root_validator from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env logger = logging.getLogger(__name__) class WatsonxLLM(BaseLLM): """ IBM watsonx.ai large language models. To use, you should have ``ibm_watson_machine_learning`` python package installed, and the environment variable ``WATSONX_APIKEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames parameters = { GenTextParamsMetaNames.DECODING_METHOD: "sample", GenTextParamsMetaNames.MAX_NEW_TOKENS: 100, GenTextParamsMetaNames.MIN_NEW_TOKENS: 1, GenTextParamsMetaNames.TEMPERATURE: 0.5, GenTextParamsMetaNames.TOP_K: 50, GenTextParamsMetaNames.TOP_P: 1, } from langchain_community.llms import WatsonxLLM llm = WatsonxLLM( model_id="google/flan-ul2", url="https://us-south.ml.cloud.ibm.com", apikey="*****", project_id="*****", params=parameters, ) """ model_id: str = "" """Type of model to use.""" project_id: str = "" """ID of the Watson Studio project.""" space_id: str = "" """ID of the Watson Studio space.""" url: Optional[SecretStr] = None """Url to Watson Machine Learning instance""" apikey: Optional[SecretStr] = None """Apikey to Watson Machine Learning instance""" token: Optional[SecretStr] = None """Token to Watson Machine Learning instance""" password: Optional[SecretStr] = None """Password to Watson Machine Learning instance""" username: Optional[SecretStr] = None """Username to Watson Machine Learning instance""" instance_id: Optional[SecretStr] = None """Instance_id of Watson Machine Learning instance""" version: Optional[SecretStr] = None """Version of Watson Machine Learning instance""" params: Optional[dict] = None """Model parameters to use during generate requests.""" verify: Union[str, bool] = "" """User can pass as verify one of following: the path to a CA_BUNDLE file the path of directory with certificates of trusted CAs True - default path to truststore will be taken False - no verification will be made""" streaming: bool = False """ Whether to stream the results or not. """ watsonx_model: Any class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @classmethod def is_lc_serializable(cls) -> bool: return False @property def lc_secrets(self) -> Dict[str, str]: return { "url": "WATSONX_URL", "apikey": "WATSONX_APIKEY", "token": "WATSONX_TOKEN", "password": "WATSONX_PASSWORD", "username": "WATSONX_USERNAME", "instance_id": "WATSONX_INSTANCE_ID", } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that credentials and python package exists in environment.""" values["url"] = convert_to_secret_str( get_from_dict_or_env(values, "url", "WATSONX_URL") ) if "cloud.ibm.com" in values.get("url", "").get_secret_value(): values["apikey"] = convert_to_secret_str( get_from_dict_or_env(values, "apikey", "WATSONX_APIKEY") ) else: if ( not values["token"] and "WATSONX_TOKEN" not in os.environ and not values["password"] and "WATSONX_PASSWORD" not in os.environ and not values["apikey"] and "WATSONX_APIKEY" not in os.environ ): raise ValueError( "Did not find 'token', 'password' or 'apikey'," " please add an environment variable" " `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' " "which contains it," " or pass 'token', 'password' or 'apikey'" " as a named parameter." ) elif values["token"] or "WATSONX_TOKEN" in os.environ: values["token"] = convert_to_secret_str( get_from_dict_or_env(values, "token", "WATSONX_TOKEN") ) elif values["password"] or "WATSONX_PASSWORD" in os.environ: values["password"] = convert_to_secret_str( get_from_dict_or_env(values, "password", "WATSONX_PASSWORD") ) values["username"] = convert_to_secret_str( get_from_dict_or_env(values, "username", "WATSONX_USERNAME") ) elif values["apikey"] or "WATSONX_APIKEY" in os.environ: values["apikey"] = convert_to_secret_str( get_from_dict_or_env(values, "apikey", "WATSONX_APIKEY") ) values["username"] = convert_to_secret_str( get_from_dict_or_env(values, "username", "WATSONX_USERNAME") ) if not values["instance_id"] or "WATSONX_INSTANCE_ID" not in os.environ: values["instance_id"] = convert_to_secret_str( get_from_dict_or_env(values, "instance_id", "WATSONX_INSTANCE_ID") ) try: from ibm_watson_machine_learning.foundation_models import Model credentials = { "url": values["url"].get_secret_value() if values["url"] else None, "apikey": values["apikey"].get_secret_value() if values["apikey"] else None, "token": values["token"].get_secret_value() if values["token"] else None, "password": values["password"].get_secret_value() if values["password"] else None, "username": values["username"].get_secret_value() if values["username"] else None, "instance_id": values["instance_id"].get_secret_value() if values["instance_id"] else None, "version": values["version"].get_secret_value() if values["version"] else None, } credentials_without_none_value = { key: value for key, value in credentials.items() if value is not None } watsonx_model = Model( model_id=values["model_id"], credentials=credentials_without_none_value, params=values["params"], project_id=values["project_id"], space_id=values["space_id"], verify=values["verify"], ) values["watsonx_model"] = watsonx_model except ImportError: raise ImportError( "Could not import ibm_watson_machine_learning python package. " "Please install it with `pip install ibm_watson_machine_learning`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_id": self.model_id, "params": self.params, "project_id": self.project_id, "space_id": self.space_id, } @property def _llm_type(self) -> str: """Return type of llm.""" return "IBM watsonx.ai" @staticmethod def _extract_token_usage( response: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: if response is None: return {"generated_token_count": 0, "input_token_count": 0} input_token_count = 0 generated_token_count = 0 def get_count_value(key: str, result: Dict[str, Any]) -> int: return result.get(key, 0) or 0 for res in response: results = res.get("results") if results: input_token_count += get_count_value("input_token_count", results[0]) generated_token_count += get_count_value( "generated_token_count", results[0] ) return { "generated_token_count": generated_token_count, "input_token_count": input_token_count, } def _create_llm_result(self, response: List[dict]) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for res in response: results = res.get("results") if results: finish_reason = results[0].get("stop_reason") gen = Generation( text=results[0].get("generated_text"), generation_info={"finish_reason": finish_reason}, ) generations.append([gen]) final_token_usage = self._extract_token_usage(response) llm_output = {"token_usage": final_token_usage, "model_id": self.model_id} return LLMResult(generations=generations, llm_output=llm_output) def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the IBM watsonx.ai inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager. Returns: The string generated by the model. Example: .. code-block:: python response = watsonxllm("What is a molecule") """ result = self._generate( prompts=[prompt], stop=stop, run_manager=run_manager, **kwargs ) return result.generations[0][0].text def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: """Call the IBM watsonx.ai inference endpoint which then generate the response. Args: prompts: List of strings (prompts) to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager. Returns: The full LLMResult output. Example: .. code-block:: python response = watsonxllm.generate(["What is a molecule"]) """ should_stream = stream if stream is not None else self.streaming if should_stream: if len(prompts) > 1: raise ValueError( f"WatsonxLLM currently only supports single prompt, got {prompts}" ) generation = GenerationChunk(text="") stream_iter = self._stream( prompts[0], stop=stop, run_manager=run_manager, **kwargs ) for chunk in stream_iter: if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) else: response = self.watsonx_model.generate(prompt=prompts) return self._create_llm_result(response) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Call the IBM watsonx.ai inference endpoint which then streams the response. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager. Returns: The iterator which yields generation chunks. Example: .. code-block:: python response = watsonxllm.stream("What is a molecule") for chunk in response: print(chunk, end='') """ for chunk in self.watsonx_model.generate_text_stream(prompt=prompt): if chunk: yield GenerationChunk(text=chunk) if run_manager: run_manager.on_llm_new_token(chunk)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~callbacks~tracers~comet.py
from types import ModuleType, SimpleNamespace from typing import TYPE_CHECKING, Any, Callable, Dict from libs.core.langchain_core.tracers import BaseTracer if TYPE_CHECKING: from uuid import UUID from comet_llm import Span from comet_llm.chains.chain import Chain from langchain_community.callbacks.tracers.schemas import Run def _get_run_type(run: "Run") -> str: if isinstance(run.run_type, str): return run.run_type elif hasattr(run.run_type, "value"): return run.run_type.value else: return str(run.run_type) def import_comet_llm_api() -> SimpleNamespace: """Import comet_llm api and raise an error if it is not installed.""" try: from comet_llm import ( experiment_info, # noqa: F401 flush, # noqa: F401 ) from comet_llm.chains import api as chain_api # noqa: F401 from comet_llm.chains import ( chain, # noqa: F401 span, # noqa: F401 ) except ImportError: raise ImportError( "To use the CometTracer you need to have the " "`comet_llm>=2.0.0` python package installed. Please install it with" " `pip install -U comet_llm`" ) return SimpleNamespace( chain=chain, span=span, chain_api=chain_api, experiment_info=experiment_info, flush=flush, ) class CometTracer(BaseTracer): """Comet Tracer.""" def __init__(self, **kwargs: Any) -> None: """Initialize the Comet Tracer.""" super().__init__(**kwargs) self._span_map: Dict["UUID", "Span"] = {} """Map from run id to span.""" self._chains_map: Dict["UUID", "Chain"] = {} """Map from run id to chain.""" self._initialize_comet_modules() def _initialize_comet_modules(self) -> None: comet_llm_api = import_comet_llm_api() self._chain: ModuleType = comet_llm_api.chain self._span: ModuleType = comet_llm_api.span self._chain_api: ModuleType = comet_llm_api.chain_api self._experiment_info: ModuleType = comet_llm_api.experiment_info self._flush: Callable[[], None] = comet_llm_api.flush def _persist_run(self, run: "Run") -> None: chain_ = self._chains_map[run.id] chain_.set_outputs(outputs=run.outputs) self._chain_api.log_chain(chain_) def _process_start_trace(self, run: "Run") -> None: if not run.parent_run_id: # This is the first run, which maps to a chain chain_: "Chain" = self._chain.Chain( inputs=run.inputs, metadata=None, experiment_info=self._experiment_info.get(), ) self._chains_map[run.id] = chain_ else: span: "Span" = self._span.Span( inputs=run.inputs, category=_get_run_type(run), metadata=run.extra, name=run.name, ) span.__api__start__(self._chains_map[run.parent_run_id]) self._chains_map[run.id] = self._chains_map[run.parent_run_id] self._span_map[run.id] = span def _process_end_trace(self, run: "Run") -> None: if not run.parent_run_id: pass # Langchain will call _persist_run for us else: span = self._span_map[run.id] span.set_outputs(outputs=run.outputs) span.__api__end__() def flush(self) -> None: self._flush() def _on_llm_start(self, run: "Run") -> None: """Process the LLM Run upon start.""" self._process_start_trace(run) def _on_llm_end(self, run: "Run") -> None: """Process the LLM Run.""" self._process_end_trace(run) def _on_llm_error(self, run: "Run") -> None: """Process the LLM Run upon error.""" self._process_end_trace(run) def _on_chain_start(self, run: "Run") -> None: """Process the Chain Run upon start.""" self._process_start_trace(run) def _on_chain_end(self, run: "Run") -> None: """Process the Chain Run.""" self._process_end_trace(run) def _on_chain_error(self, run: "Run") -> None: """Process the Chain Run upon error.""" self._process_end_trace(run) def _on_tool_start(self, run: "Run") -> None: """Process the Tool Run upon start.""" self._process_start_trace(run) def _on_tool_end(self, run: "Run") -> None: """Process the Tool Run.""" self._process_end_trace(run) def _on_tool_error(self, run: "Run") -> None: """Process the Tool Run upon error.""" self._process_end_trace(run)
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~output_parsers~yaml.py
import json import re from typing import Type, TypeVar import yaml from libs.core.langchain_core.exceptions import OutputParserException from libs.core.langchain_core.output_parsers import BaseOutputParser from libs.core.langchain_core.pydantic_v1 import BaseModel, ValidationError from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS T = TypeVar("T", bound=BaseModel) class YamlOutputParser(BaseOutputParser[T]): """Parse YAML output using a pydantic model.""" pydantic_object: Type[T] """The pydantic model to parse.""" pattern: re.Pattern = re.compile( r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL ) """Regex pattern to match yaml code blocks within triple backticks with optional yaml or yml prefix.""" def parse(self, text: str) -> T: try: # Greedy search for 1st yaml candidate. match = re.search(self.pattern, text.strip()) yaml_str = "" if match: yaml_str = match.group("yaml") json_object = yaml.safe_load(yaml_str) return self.pydantic_object.parse_obj(json_object) except (yaml.YAMLError, ValidationError) as e: name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {text}. Got: {e}" raise OutputParserException(msg, llm_output=text) def get_format_instructions(self) -> str: schema = self.pydantic_object.schema() # Remove extraneous fields. reduced_schema = schema if "title" in reduced_schema: del reduced_schema["title"] if "type" in reduced_schema: del reduced_schema["type"] # Ensure yaml in context is well-formed with double quotes. schema_str = json.dumps(reduced_schema) return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str) @property def _type(self) -> str: return "yaml"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~matching_engine.py
from __future__ import annotations import json import logging import time import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.utilities.vertexai import get_client_info if TYPE_CHECKING: from google.cloud import storage from google.cloud.aiplatform import MatchingEngineIndex, MatchingEngineIndexEndpoint from google.cloud.aiplatform.matching_engine.matching_engine_index_endpoint import ( Namespace, ) from google.oauth2.service_account import Credentials from langchain_community.embeddings import TensorflowHubEmbeddings logger = logging.getLogger(__name__) class MatchingEngine(VectorStore): """`Google Vertex AI Vector Search` (previously Matching Engine) vector store. While the embeddings are stored in the Matching Engine, the embedded documents will be stored in GCS. An existing Index and corresponding Endpoint are preconditions for using this module. See usage in docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb Note that this implementation is mostly meant for reading if you are planning to do a real time implementation. While reading is a real time operation, updating the index takes close to one hour.""" def __init__( self, project_id: str, index: MatchingEngineIndex, endpoint: MatchingEngineIndexEndpoint, embedding: Embeddings, gcs_client: storage.Client, gcs_bucket_name: str, credentials: Optional[Credentials] = None, *, document_id_key: Optional[str] = None, ): """Google Vertex AI Vector Search (previously Matching Engine) implementation of the vector store. While the embeddings are stored in the Matching Engine, the embedded documents will be stored in GCS. An existing Index and corresponding Endpoint are preconditions for using this module. See usage in docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb. Note that this implementation is mostly meant for reading if you are planning to do a real time implementation. While reading is a real time operation, updating the index takes close to one hour. Attributes: project_id: The GCS project id. index: The created index class. See ~:func:`MatchingEngine.from_components`. endpoint: The created endpoint class. See ~:func:`MatchingEngine.from_components`. embedding: A :class:`Embeddings` that will be used for embedding the text sent. If none is sent, then the multilingual Tensorflow Universal Sentence Encoder will be used. gcs_client: The GCS client. gcs_bucket_name: The GCS bucket name. credentials (Optional): Created GCP credentials. document_id_key (Optional): Key for storing document ID in document metadata. If None, document ID will not be returned in document metadata. """ super().__init__() self._validate_google_libraries_installation() self.project_id = project_id self.index = index self.endpoint = endpoint self.embedding = embedding self.gcs_client = gcs_client self.credentials = credentials self.gcs_bucket_name = gcs_bucket_name self.document_id_key = document_id_key @property def embeddings(self) -> Embeddings: return self.embedding def _validate_google_libraries_installation(self) -> None: """Validates that Google libraries that are needed are installed.""" try: from google.cloud import aiplatform, storage # noqa: F401 from google.oauth2 import service_account # noqa: F401 except ImportError: raise ImportError( "You must run `pip install --upgrade " "google-cloud-aiplatform google-cloud-storage`" "to use the MatchingEngine Vectorstore." ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters. Returns: List of ids from adding the texts into the vectorstore. """ texts = list(texts) if metadatas is not None and len(texts) != len(metadatas): raise ValueError( "texts and metadatas do not have the same length. Received " f"{len(texts)} texts and {len(metadatas)} metadatas." ) logger.debug("Embedding documents.") embeddings = self.embedding.embed_documents(texts) jsons = [] ids = [] # Could be improved with async. for idx, (embedding, text) in enumerate(zip(embeddings, texts)): id = str(uuid.uuid4()) ids.append(id) json_: dict = {"id": id, "embedding": embedding} if metadatas is not None: json_["metadata"] = metadatas[idx] jsons.append(json_) self._upload_to_gcs(text, f"documents/{id}") logger.debug(f"Uploaded {len(ids)} documents to GCS.") # Creating json lines from the embedded documents. result_str = "\n".join([json.dumps(x) for x in jsons]) filename_prefix = f"indexes/{uuid.uuid4()}" filename = f"{filename_prefix}/{time.time()}.json" self._upload_to_gcs(result_str, filename) logger.debug( f"Uploaded updated json with embeddings to " f"{self.gcs_bucket_name}/{filename}." ) self.index = self.index.update_embeddings( contents_delta_uri=f"gs://{self.gcs_bucket_name}/{filename_prefix}/" ) logger.debug("Updated index with new configuration.") return ids def _upload_to_gcs(self, data: str, gcs_location: str) -> None: """Uploads data to gcs_location. Args: data: The data that will be stored. gcs_location: The location where the data will be stored. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) blob.upload_from_string(data) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[List[Namespace]] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query and their cosine distance from the query. Args: query: String query look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional. A list of Namespaces for filtering the matching results. For example: [Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])] will match datapoints that satisfy "red color" but not include datapoints with "squared shape". Please refer to https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json for more detail. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ logger.debug(f"Embedding query {query}.") embedding_query = self.embedding.embed_query(query) return self.similarity_search_by_vector_with_score( embedding_query, k=k, filter=filter ) def similarity_search_by_vector_with_score( self, embedding: List[float], k: int = 4, filter: Optional[List[Namespace]] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to the embedding and their cosine distance. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional. A list of Namespaces for filtering the matching results. For example: [Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])] will match datapoints that satisfy "red color" but not include datapoints with "squared shape". Please refer to https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json for more detail. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ filter = filter or [] # If the endpoint is public we use the find_neighbors function. if hasattr(self.endpoint, "_public_match_client") and ( self.endpoint._public_match_client ): response = self.endpoint.find_neighbors( deployed_index_id=self._get_index_id(), queries=[embedding], num_neighbors=k, filter=filter, ) else: response = self.endpoint.match( deployed_index_id=self._get_index_id(), queries=[embedding], num_neighbors=k, filter=filter, ) logger.debug(f"Found {len(response)} matches.") if len(response) == 0: return [] docs: List[Tuple[Document, float]] = [] # I'm only getting the first one because queries receives an array # and the similarity_search method only receives one query. This # means that the match method will always return an array with only # one element. for result in response[0]: page_content = self._download_from_gcs(f"documents/{result.id}") # TODO: return all metadata. metadata = {} if self.document_id_key is not None: metadata[self.document_id_key] = result.id document = Document( page_content=page_content, metadata=metadata, ) docs.append((document, result.distance)) logger.debug("Downloaded documents for query.") return docs def similarity_search( self, query: str, k: int = 4, filter: Optional[List[Namespace]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: The string that will be used to search for similar documents. k: The amount of neighbors that will be retrieved. filter: Optional. A list of Namespaces for filtering the matching results. For example: [Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])] will match datapoints that satisfy "red color" but not include datapoints with "squared shape". Please refer to https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json for more detail. Returns: A list of k matching documents. """ docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, **kwargs ) return [doc for doc, _ in docs_and_scores] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[List[Namespace]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to the embedding. Args: embedding: Embedding to look up documents similar to. k: The amount of neighbors that will be retrieved. filter: Optional. A list of Namespaces for filtering the matching results. For example: [Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])] will match datapoints that satisfy "red color" but not include datapoints with "squared shape". Please refer to https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json for more detail. Returns: A list of k matching documents. """ docs_and_scores = self.similarity_search_by_vector_with_score( embedding, k=k, filter=filter, **kwargs ) return [doc for doc, _ in docs_and_scores] def _get_index_id(self) -> str: """Gets the correct index id for the endpoint. Returns: The index id if found (which should be found) or throws ValueError otherwise. """ for index in self.endpoint.deployed_indexes: if index.index == self.index.resource_name: return index.id raise ValueError( f"No index with id {self.index.resource_name} " f"deployed on endpoint " f"{self.endpoint.display_name}." ) def _download_from_gcs(self, gcs_location: str) -> str: """Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) return blob.download_as_string() @classmethod def from_texts( cls: Type["MatchingEngine"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> "MatchingEngine": """Use from components instead.""" raise NotImplementedError( "This method is not implemented. Instead, you should initialize the class" " with `MatchingEngine.from_components(...)` and then call " "`add_texts`" ) @classmethod def from_components( cls: Type["MatchingEngine"], project_id: str, region: str, gcs_bucket_name: str, index_id: str, endpoint_id: str, credentials_path: Optional[str] = None, embedding: Optional[Embeddings] = None, **kwargs: Any, ) -> "MatchingEngine": """Takes the object creation out of the constructor. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: The location where the vectors will be stored in order for the index to be created. index_id: The id of the created index. endpoint_id: The id of the created endpoint. credentials_path: (Optional) The path of the Google credentials on the local file system. embedding: The :class:`Embeddings` that will be used for embedding the texts. kwargs: Additional keyword arguments to pass to MatchingEngine.__init__(). Returns: A configured MatchingEngine with the texts added to the index. """ gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name) credentials = cls._create_credentials_from_file(credentials_path) index = cls._create_index_by_id(index_id, project_id, region, credentials) endpoint = cls._create_endpoint_by_id( endpoint_id, project_id, region, credentials ) gcs_client = cls._get_gcs_client(credentials, project_id) cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials) return cls( project_id=project_id, index=index, endpoint=endpoint, embedding=embedding or cls._get_default_embeddings(), gcs_client=gcs_client, credentials=credentials, gcs_bucket_name=gcs_bucket_name, **kwargs, ) @classmethod def _validate_gcs_bucket(cls, gcs_bucket_name: str) -> str: """Validates the gcs_bucket_name as a bucket name. Args: gcs_bucket_name: The received bucket uri. Returns: A valid gcs_bucket_name or throws ValueError if full path is provided. """ gcs_bucket_name = gcs_bucket_name.replace("gs://", "") if "/" in gcs_bucket_name: raise ValueError( f"The argument gcs_bucket_name should only be " f"the bucket name. Received {gcs_bucket_name}" ) return gcs_bucket_name @classmethod def _create_credentials_from_file( cls, json_credentials_path: Optional[str] ) -> Optional[Credentials]: """Creates credentials for GCP. Args: json_credentials_path: The path on the file system where the credentials are stored. Returns: An optional of Credentials or None, in which case the default will be used. """ from google.oauth2 import service_account credentials = None if json_credentials_path is not None: credentials = service_account.Credentials.from_service_account_file( json_credentials_path ) return credentials @classmethod def _create_index_by_id( cls, index_id: str, project_id: str, region: str, credentials: "Credentials" ) -> MatchingEngineIndex: """Creates a MatchingEngineIndex object by id. Args: index_id: The created index id. project_id: The project to retrieve index from. region: Location to retrieve index from. credentials: GCS credentials. Returns: A configured MatchingEngineIndex. """ from google.cloud import aiplatform logger.debug(f"Creating matching engine index with id {index_id}.") return aiplatform.MatchingEngineIndex( index_name=index_id, project=project_id, location=region, credentials=credentials, ) @classmethod def _create_endpoint_by_id( cls, endpoint_id: str, project_id: str, region: str, credentials: "Credentials" ) -> MatchingEngineIndexEndpoint: """Creates a MatchingEngineIndexEndpoint object by id. Args: endpoint_id: The created endpoint id. project_id: The project to retrieve index from. region: Location to retrieve index from. credentials: GCS credentials. Returns: A configured MatchingEngineIndexEndpoint. """ from google.cloud import aiplatform logger.debug(f"Creating endpoint with id {endpoint_id}.") return aiplatform.MatchingEngineIndexEndpoint( index_endpoint_name=endpoint_id, project=project_id, location=region, credentials=credentials, ) @classmethod def _get_gcs_client( cls, credentials: "Credentials", project_id: str ) -> "storage.Client": """Lazily creates a GCS client. Returns: A configured GCS client. """ from google.cloud import storage return storage.Client( credentials=credentials, project=project_id, client_info=get_client_info(module="vertex-ai-matching-engine"), ) @classmethod def _init_aiplatform( cls, project_id: str, region: str, gcs_bucket_name: str, credentials: "Credentials", ) -> None: """Configures the aiplatform library. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: GCS staging location. credentials: The GCS Credentials object. """ from google.cloud import aiplatform logger.debug( f"Initializing AI Platform for project {project_id} on " f"{region} and for {gcs_bucket_name}." ) aiplatform.init( project=project_id, location=region, staging_bucket=gcs_bucket_name, credentials=credentials, ) @classmethod def _get_default_embeddings(cls) -> "TensorflowHubEmbeddings": """This function returns the default embedding. Returns: Default TensorflowHubEmbeddings to use. """ from langchain_community.embeddings import TensorflowHubEmbeddings return TensorflowHubEmbeddings()
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~output_parsers~pydantic.py
import json import re from typing import Type, TypeVar from libs.core.langchain_core.exceptions import OutputParserException from libs.core.langchain_core.output_parsers import BaseOutputParser from libs.core.langchain_core.pydantic_v1 import BaseModel, ValidationError from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS T = TypeVar("T", bound=BaseModel) class PydanticOutputParser(BaseOutputParser[T]): """Parse an output using a pydantic model.""" pydantic_object: Type[T] """The pydantic model to parse.""" def parse(self, text: str) -> T: try: # Greedy search for 1st json candidate. match = re.search( r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL ) json_str = "" if match: json_str = match.group() json_object = json.loads(json_str, strict=False) return self.pydantic_object.parse_obj(json_object) except (json.JSONDecodeError, ValidationError) as e: name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {text}. Got: {e}" raise OutputParserException(msg, llm_output=text) def get_format_instructions(self) -> str: schema = self.pydantic_object.schema() # Remove extraneous fields. reduced_schema = schema if "title" in reduced_schema: del reduced_schema["title"] if "type" in reduced_schema: del reduced_schema["type"] # Ensure json in context is well-formed with double quotes. schema_str = json.dumps(reduced_schema) return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) @property def _type(self) -> str: return "pydantic"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_message_histories~elasticsearch.py
import json import logging from time import time from typing import TYPE_CHECKING, Any, Dict, List, Optional from libs.core.langchain_core.chat_history import BaseChatMessageHistory from libs.core.langchain_core.messages import ( BaseMessage, message_to_dict, messages_from_dict, ) if TYPE_CHECKING: from elasticsearch import Elasticsearch logger = logging.getLogger(__name__) class ElasticsearchChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in Elasticsearch. Args: es_url: URL of the Elasticsearch instance to connect to. es_cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. esnsure_ascii: Used to escape ASCII symbols in json.dumps. Defaults to True. index: Name of the index to use. session_id: Arbitrary key that is used to store the messages of a single chat session. """ def __init__( self, index: str, session_id: str, *, es_connection: Optional["Elasticsearch"] = None, es_url: Optional[str] = None, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_api_key: Optional[str] = None, es_password: Optional[str] = None, esnsure_ascii: Optional[bool] = True, ): self.index: str = index self.session_id: str = session_id self.ensure_ascii: bool = esnsure_ascii # Initialize Elasticsearch client from passed client arg or connection info if es_connection is not None: self.client = es_connection.options( headers={"user-agent": self.get_user_agent()} ) elif es_url is not None or es_cloud_id is not None: self.client = ElasticsearchChatMessageHistory.connect_to_elasticsearch( es_url=es_url, username=es_user, password=es_password, cloud_id=es_cloud_id, api_key=es_api_key, ) else: raise ValueError( """Either provide a pre-existing Elasticsearch connection, \ or valid credentials for creating a new connection.""" ) if self.client.indices.exists(index=index): logger.debug( f"Chat history index {index} already exists, skipping creation." ) else: logger.debug(f"Creating index {index} for storing chat history.") self.client.indices.create( index=index, mappings={ "properties": { "session_id": {"type": "keyword"}, "created_at": {"type": "date"}, "history": {"type": "text"}, } }, ) @staticmethod def get_user_agent() -> str: from langchain_community import __version__ return f"langchain-py-ms/{__version__}" @staticmethod def connect_to_elasticsearch( *, es_url: Optional[str] = None, cloud_id: Optional[str] = None, api_key: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> "Elasticsearch": try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) if es_url and cloud_id: raise ValueError( "Both es_url and cloud_id are defined. Please provide only one." ) connection_params: Dict[str, Any] = {} if es_url: connection_params["hosts"] = [es_url] elif cloud_id: connection_params["cloud_id"] = cloud_id else: raise ValueError("Please provide either elasticsearch_url or cloud_id.") if api_key: connection_params["api_key"] = api_key elif username and password: connection_params["basic_auth"] = (username, password) es_client = elasticsearch.Elasticsearch( **connection_params, headers={"user-agent": ElasticsearchChatMessageHistory.get_user_agent()}, ) try: es_client.info() except Exception as err: logger.error(f"Error connecting to Elasticsearch: {err}") raise err return es_client @property def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve the messages from Elasticsearch""" try: from elasticsearch import ApiError result = self.client.search( index=self.index, query={"term": {"session_id": self.session_id}}, sort="created_at:asc", ) except ApiError as err: logger.error(f"Could not retrieve messages from Elasticsearch: {err}") raise err if result and len(result["hits"]["hits"]) > 0: items = [ json.loads(document["_source"]["history"]) for document in result["hits"]["hits"] ] else: items = [] return messages_from_dict(items) def add_message(self, message: BaseMessage) -> None: """Add a message to the chat session in Elasticsearch""" try: from elasticsearch import ApiError self.client.index( index=self.index, document={ "session_id": self.session_id, "created_at": round(time() * 1000), "history": json.dumps( message_to_dict(message), ensure_ascii=self.ensure_ascii, ), }, refresh=True, ) except ApiError as err: logger.error(f"Could not add message to Elasticsearch: {err}") raise err def clear(self) -> None: """Clear session memory in Elasticsearch""" try: from elasticsearch import ApiError self.client.delete_by_query( index=self.index, query={"term": {"session_id": self.session_id}}, refresh=True, ) except ApiError as err: logger.error(f"Could not clear session memory in Elasticsearch: {err}") raise err
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~graph_qa~nebulagraph.py
"""Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.prompts import BasePromptTemplate from libs.core.langchain_core.pydantic_v1 import Field from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, NGQL_GENERATION_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.nebula_graph import NebulaGraph class NebulaGraphQAChain(Chain): """Chain for question-answering against a graph by generating nGQL statements. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ graph: NebulaGraph = Field(exclude=True) ngql_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, ngql_prompt: BasePromptTemplate = NGQL_GENERATION_PROMPT, **kwargs: Any, ) -> NebulaGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) ngql_generation_chain = LLMChain(llm=llm, prompt=ngql_prompt) return cls( qa_chain=qa_chain, ngql_generation_chain=ngql_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate nGQL statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_ngql = self.ngql_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated nGQL:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_ngql, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_ngql) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~epsilla.py
"""Wrapper around Epsilla vector database.""" from __future__ import annotations import logging import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Type from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from pyepsilla import vectordb logger = logging.getLogger() class Epsilla(VectorStore): """ Wrapper around Epsilla vector database. As a prerequisite, you need to install ``pyepsilla`` package and have a running Epsilla vector database (for example, through our docker image) See the following documentation for how to run an Epsilla vector database: https://epsilla-inc.gitbook.io/epsilladb/quick-start Args: client (Any): Epsilla client to connect to. embeddings (Embeddings): Function used to embed the texts. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". Example: .. code-block:: python from langchain_community.vectorstores import Epsilla from pyepsilla import vectordb client = vectordb.Client() embeddings = OpenAIEmbeddings() db_path = "/tmp/vectorstore" db_name = "langchain_store" epsilla = Epsilla(client, embeddings, db_path, db_name) """ _LANGCHAIN_DEFAULT_DB_NAME = "langchain_store" _LANGCHAIN_DEFAULT_DB_PATH = "/tmp/langchain-epsilla" _LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_collection" def __init__( self, client: Any, embeddings: Embeddings, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, ): """Initialize with necessary components.""" try: import pyepsilla except ImportError as e: raise ImportError( "Could not import pyepsilla python package. " "Please install pyepsilla package with `pip install pyepsilla`." ) from e if not isinstance(client, pyepsilla.vectordb.Client): raise TypeError( f"client should be an instance of pyepsilla.vectordb.Client, " f"got {type(client)}" ) self._client: vectordb.Client = client self._db_name = db_name self._embeddings = embeddings self._collection_name = Epsilla._LANGCHAIN_DEFAULT_TABLE_NAME self._client.load_db(db_name=db_name, db_path=db_path) self._client.use_db(db_name=db_name) @property def embeddings(self) -> Optional[Embeddings]: return self._embeddings def use_collection(self, collection_name: str) -> None: """ Set default collection to use. Args: collection_name (str): The name of the collection. """ self._collection_name = collection_name def clear_data(self, collection_name: str = "") -> None: """ Clear data in a collection. Args: collection_name (Optional[str]): The name of the collection. If not provided, the default collection will be used. """ if not collection_name: collection_name = self._collection_name self._client.drop_table(collection_name) def get( self, collection_name: str = "", response_fields: Optional[List[str]] = None ) -> List[dict]: """Get the collection. Args: collection_name (Optional[str]): The name of the collection to retrieve data from. If not provided, the default collection will be used. response_fields (Optional[List[str]]): List of field names in the result. If not specified, all available fields will be responded. Returns: A list of the retrieved data. """ if not collection_name: collection_name = self._collection_name status_code, response = self._client.get( table_name=collection_name, response_fields=response_fields ) if status_code != 200: logger.error(f"Failed to get records: {response['message']}") raise Exception("Error: {}.".format(response["message"])) return response["result"] def _create_collection( self, table_name: str, embeddings: list, metadatas: Optional[list[dict]] = None ) -> None: if not embeddings: raise ValueError("Embeddings list is empty.") dim = len(embeddings[0]) fields: List[dict] = [ {"name": "id", "dataType": "INT"}, {"name": "text", "dataType": "STRING"}, {"name": "embeddings", "dataType": "VECTOR_FLOAT", "dimensions": dim}, ] if metadatas is not None: field_names = [field["name"] for field in fields] for metadata in metadatas: for key, value in metadata.items(): if key in field_names: continue d_type: str if isinstance(value, str): d_type = "STRING" elif isinstance(value, int): d_type = "INT" elif isinstance(value, float): d_type = "FLOAT" elif isinstance(value, bool): d_type = "BOOL" else: raise ValueError(f"Unsupported data type for {key}.") fields.append({"name": key, "dataType": d_type}) field_names.append(key) status_code, response = self._client.create_table( table_name, table_fields=fields ) if status_code != 200: if status_code == 409: logger.info(f"Continuing with the existing table {table_name}.") else: logger.error( f"Failed to create collection {table_name}: {response['message']}" ) raise Exception("Error: {}.".format(response["message"])) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, collection_name: Optional[str] = "", drop_old: Optional[bool] = False, **kwargs: Any, ) -> List[str]: """ Embed texts and add them to the database. Args: texts (Iterable[str]): The texts to embed. metadatas (Optional[List[dict]]): Metadata dicts attached to each of the texts. Defaults to None. collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: List of ids of the added texts. """ if not collection_name: collection_name = self._collection_name else: self._collection_name = collection_name if drop_old: self._client.drop_db(db_name=collection_name) texts = list(texts) try: embeddings = self._embeddings.embed_documents(texts) except NotImplementedError: embeddings = [self._embeddings.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] self._create_collection( table_name=collection_name, embeddings=embeddings, metadatas=metadatas ) ids = [hash(uuid.uuid4()) for _ in texts] records = [] for index, id in enumerate(ids): record = { "id": id, "text": texts[index], "embeddings": embeddings[index], } if metadatas is not None: metadata = metadatas[index].items() for key, value in metadata: record[key] = value records.append(record) status_code, response = self._client.insert( table_name=collection_name, records=records ) if status_code != 200: logger.error( f"Failed to add records to {collection_name}: {response['message']}" ) raise Exception("Error: {}.".format(response["message"])) return [str(id) for id in ids] def similarity_search( self, query: str, k: int = 4, collection_name: str = "", **kwargs: Any ) -> List[Document]: """ Return the documents that are semantically most relevant to the query. Args: query (str): String to query the vectorstore with. k (Optional[int]): Number of documents to return. Defaults to 4. collection_name (Optional[str]): Collection to use. Defaults to "langchain_store" or the one provided before. Returns: List of documents that are semantically most relevant to the query """ if not collection_name: collection_name = self._collection_name query_vector = self._embeddings.embed_query(query) status_code, response = self._client.query( table_name=collection_name, query_field="embeddings", query_vector=query_vector, limit=k, ) if status_code != 200: logger.error(f"Search failed: {response['message']}.") raise Exception("Error: {}.".format(response["message"])) exclude_keys = ["id", "text", "embeddings"] return list( map( lambda item: Document( page_content=item["text"], metadata={ key: item[key] for key in item if key not in exclude_keys }, ), response["result"], ) ) @classmethod def from_texts( cls: Type[Epsilla], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Any = None, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, collection_name: Optional[str] = _LANGCHAIN_DEFAULT_TABLE_NAME, drop_old: Optional[bool] = False, **kwargs: Any, ) -> Epsilla: """Create an Epsilla vectorstore from raw documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store. """ instance = Epsilla(client, embedding, db_path=db_path, db_name=db_name) instance.add_texts( texts, metadatas=metadatas, collection_name=collection_name, drop_old=drop_old, **kwargs, ) return instance @classmethod def from_documents( cls: Type[Epsilla], documents: List[Document], embedding: Embeddings, client: Any = None, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, collection_name: Optional[str] = _LANGCHAIN_DEFAULT_TABLE_NAME, drop_old: Optional[bool] = False, **kwargs: Any, ) -> Epsilla: """Create an Epsilla vectorstore from a list of documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts, embedding, metadatas=metadatas, client=client, db_path=db_path, db_name=db_name, collection_name=collection_name, drop_old=drop_old, **kwargs, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~parsers~doc_intelligence.py
from typing import Any, Iterator, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob class AzureAIDocumentIntelligenceParser(BaseBlobParser): """Loads a PDF with Azure Document Intelligence (formerly Forms Recognizer).""" def __init__( self, api_endpoint: str, api_key: str, api_version: Optional[str] = None, api_model: str = "prebuilt-layout", mode: str = "markdown", ): from azure.ai.documentintelligence import DocumentIntelligenceClient from azure.core.credentials import AzureKeyCredential kwargs = {} if api_version is not None: kwargs["api_version"] = api_version self.client = DocumentIntelligenceClient( endpoint=api_endpoint, credential=AzureKeyCredential(api_key), headers={"x-ms-useragent": "langchain-parser/1.0.0"}, **kwargs, ) self.api_model = api_model self.mode = mode assert self.mode in ["single", "page", "object", "markdown"] def _generate_docs_page(self, result: Any) -> Iterator[Document]: for p in result.pages: content = " ".join([line.content for line in p.lines]) d = Document( page_content=content, metadata={ "page": p.page_number, }, ) yield d def _generate_docs_single(self, result: Any) -> Iterator[Document]: yield Document(page_content=result.content, metadata={}) def _generate_docs_object(self, result: Any) -> Iterator[Document]: # record relationship between page id and span offset page_offset = [] for page in result.pages: # assume that spans only contain 1 element, to double check page_offset.append(page.spans[0]["offset"]) # paragraph # warning: paragraph content is overlapping with table content for para in result.paragraphs: yield Document( page_content=para.content, metadata={ "role": para.role, "page": para.bounding_regions[0].page_number, "bounding_box": para.bounding_regions[0].polygon, "type": "paragraph", }, ) # table for table in result.tables: yield Document( page_content=table.cells, # json object metadata={ "footnote": table.footnotes, "caption": table.caption, "page": para.bounding_regions[0].page_number, "bounding_box": para.bounding_regions[0].polygon, "row_count": table.row_count, "column_count": table.column_count, "type": "table", }, ) def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" with blob.as_bytes_io() as file_obj: poller = self.client.begin_analyze_document( self.api_model, file_obj, content_type="application/octet-stream", output_content_format="markdown" if self.mode == "markdown" else "text", ) result = poller.result() if self.mode in ["single", "markdown"]: yield from self._generate_docs_single(result) elif self.mode == ["page"]: yield from self._generate_docs_page(result) else: yield from self._generate_docs_object(result) def parse_url(self, url: str) -> Iterator[Document]: from azure.ai.documentintelligence.models import AnalyzeDocumentRequest poller = self.client.begin_analyze_document( self.api_model, AnalyzeDocumentRequest(url_source=url), # content_type="application/octet-stream", output_content_format="markdown" if self.mode == "markdown" else "text", ) result = poller.result() if self.mode in ["single", "markdown"]: yield from self._generate_docs_single(result) elif self.mode == ["page"]: yield from self._generate_docs_page(result) else: yield from self._generate_docs_object(result)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~llms~test_opaqueprompts.py
from libs.core.langchain_core.output_parsers import StrOutputParser from libs.core.langchain_core.prompts import PromptTemplate from libs.core.langchain_core.runnables import RunnableParallel import langchain_community.utilities.opaqueprompts as op from langchain_community.llms import OpenAI from langchain_community.llms.opaqueprompts import OpaquePrompts prompt_template = """ As an AI assistant, you will answer questions according to given context. Sensitive personal information in the question is masked for privacy. For instance, if the original text says "Giana is good," it will be changed to "PERSON_998 is good." Here's how to handle these changes: * Consider these masked phrases just as placeholders, but still refer to them in a relevant way when answering. * It's possible that different masked terms might mean the same thing. Stick with the given term and don't modify it. * All masked terms follow the "TYPE_ID" pattern. * Please don't invent new masked terms. For instance, if you see "PERSON_998," don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question. Conversation History: ```{history}``` Context : ```During our recent meeting on February 23, 2023, at 10:30 AM, John Doe provided me with his personal details. His email is [email protected] and his contact number is 650-456-7890. He lives in New York City, USA, and belongs to the American nationality with Christian beliefs and a leaning towards the Democratic party. He mentioned that he recently made a transaction using his credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website as https://johndoeportfolio.com. John also discussed some of his US-specific details. He said his bank account number is 1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is 123456789. He emphasized not to share his SSN, which is 669-45-6789. Furthermore, he mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has a medical license number MED-123456. ``` Question: ```{question}``` """ def test_opaqueprompts() -> None: chain = PromptTemplate.from_template(prompt_template) | OpaquePrompts(llm=OpenAI()) output = chain.invoke( { "question": "Write a text message to remind John to do password reset \ for his website through his email to stay secure." } ) assert isinstance(output, str) def test_opaqueprompts_functions() -> None: prompt = (PromptTemplate.from_template(prompt_template),) llm = OpenAI() pg_chain = ( op.sanitize | RunnableParallel( secure_context=lambda x: x["secure_context"], # type: ignore response=(lambda x: x["sanitized_input"]) # type: ignore | prompt | llm | StrOutputParser(), ) | (lambda x: op.desanitize(x["response"], x["secure_context"])) ) pg_chain.invoke( { "question": "Write a text message to remind John to do password reset\ for his website through his email to stay secure.", "history": "", } )
[ "PERSON_998 is good.", "PERSON_998,", "\nAs an AI assistant, you will answer questions according to given context.\n\nSensitive personal information in the question is masked for privacy.\nFor instance, if the original text says \"Giana is good,\" it will be changed\nto \"PERSON_998 is good.\"\n\nHere's how to handle these changes:\n* Consider these masked phrases just as placeholders, but still refer to\nthem in a relevant way when answering.\n* It's possible that different masked terms might mean the same thing.\nStick with the given term and don't modify it.\n* All masked terms follow the \"TYPE_ID\" pattern.\n* Please don't invent new masked terms. For instance, if you see \"PERSON_998,\"\ndon't come up with \"PERSON_997\" or \"PERSON_999\" unless they're already in the question.\n\nConversation History: ```{history}```\nContext : ```During our recent meeting on February 23, 2023, at 10:30 AM,\nJohn Doe provided me with his personal details. His email is [email protected]\nand his contact number is 650-456-7890. He lives in New York City, USA, and\nbelongs to the American nationality with Christian beliefs and a leaning towards\nthe Democratic party. He mentioned that he recently made a transaction using his\ncredit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address\n1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he\nnoted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided\nhis website as https://johndoeportfolio.com. John also discussed\nsome of his US-specific details. He said his bank account number is\n1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,\nand he recently renewed his passport,\nthe number for which is 123456789. He emphasized not to share his SSN, which is\n669-45-6789. Furthermore, he mentioned that he accesses his work files remotely\nthrough the IP 192.168.1.1 and has a medical license number MED-123456. ```\nQuestion: ```{question}```\n", "Giana is good,", "t come up with \"PERSON_997\" or \"PERSON_999\" unless they" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~base_o365.py
"""Base class for all loaders that uses O365 Package""" from __future__ import annotations import logging import os import tempfile from abc import abstractmethod from enum import Enum from pathlib import Path from typing import TYPE_CHECKING, Dict, Iterable, List, Sequence, Union from libs.core.langchain_core.pydantic_v1 import ( BaseModel, BaseSettings, Field, FilePath, SecretStr, ) from langchain_community.document_loaders.base import BaseLoader from langchain_community.document_loaders.blob_loaders.file_system import ( FileSystemBlobLoader, ) from langchain_community.document_loaders.blob_loaders.schema import Blob if TYPE_CHECKING: from O365 import Account from O365.drive import Drive, Folder logger = logging.getLogger(__name__) CHUNK_SIZE = 1024 * 1024 * 5 class _O365Settings(BaseSettings): client_id: str = Field(..., env="O365_CLIENT_ID") client_secret: SecretStr = Field(..., env="O365_CLIENT_SECRET") class Config: env_prefix = "" case_sentive = False env_file = ".env" class _O365TokenStorage(BaseSettings): token_path: FilePath = Path.home() / ".credentials" / "o365_token.txt" class _FileType(str, Enum): DOC = "doc" DOCX = "docx" PDF = "pdf" def fetch_mime_types(file_types: Sequence[_FileType]) -> Dict[str, str]: """Fetch the mime types for the specified file types.""" mime_types_mapping = {} for file_type in file_types: if file_type.value == "doc": mime_types_mapping[file_type.value] = "application/msword" elif file_type.value == "docx": mime_types_mapping[ file_type.value ] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" # noqa: E501 elif file_type.value == "pdf": mime_types_mapping[file_type.value] = "application/pdf" return mime_types_mapping class O365BaseLoader(BaseLoader, BaseModel): """Base class for all loaders that uses O365 Package""" settings: _O365Settings = Field(default_factory=_O365Settings) """Settings for the Office365 API client.""" auth_with_token: bool = False """Whether to authenticate with a token or not. Defaults to False.""" chunk_size: Union[int, str] = CHUNK_SIZE """Number of bytes to retrieve from each api call to the server. int or 'auto'.""" @property @abstractmethod def _file_types(self) -> Sequence[_FileType]: """Return supported file types.""" @property def _fetch_mime_types(self) -> Dict[str, str]: """Return a dict of supported file types to corresponding mime types.""" return fetch_mime_types(self._file_types) @property @abstractmethod def _scopes(self) -> List[str]: """Return required scopes.""" def _load_from_folder(self, folder: Folder) -> Iterable[Blob]: """Lazily load all files from a specified folder of the configured MIME type. Args: folder: The Folder instance from which the files are to be loaded. This Folder instance should represent a directory in a file system where the files are stored. Yields: An iterator that yields Blob instances, which are binary representations of the files loaded from the folder. """ file_mime_types = self._fetch_mime_types items = folder.get_items() with tempfile.TemporaryDirectory() as temp_dir: os.makedirs(os.path.dirname(temp_dir), exist_ok=True) for file in items: if file.is_file: if file.mime_type in list(file_mime_types.values()): file.download(to_path=temp_dir, chunk_size=self.chunk_size) loader = FileSystemBlobLoader(path=temp_dir) yield from loader.yield_blobs() def _load_from_object_ids( self, drive: Drive, object_ids: List[str] ) -> Iterable[Blob]: """Lazily load files specified by their object_ids from a drive. Load files into the system as binary large objects (Blobs) and return Iterable. Args: drive: The Drive instance from which the files are to be loaded. This Drive instance should represent a cloud storage service or similar storage system where the files are stored. object_ids: A list of object_id strings. Each object_id represents a unique identifier for a file in the drive. Yields: An iterator that yields Blob instances, which are binary representations of the files loaded from the drive using the specified object_ids. """ file_mime_types = self._fetch_mime_types with tempfile.TemporaryDirectory() as temp_dir: for object_id in object_ids: file = drive.get_item(object_id) if not file: logging.warning( "There isn't a file with" f"object_id {object_id} in drive {drive}." ) continue if file.is_file: if file.mime_type in list(file_mime_types.values()): file.download(to_path=temp_dir, chunk_size=self.chunk_size) loader = FileSystemBlobLoader(path=temp_dir) yield from loader.yield_blobs() def _auth(self) -> Account: """Authenticates the OneDrive API client Returns: The authenticated Account object. """ try: from O365 import Account, FileSystemTokenBackend except ImportError: raise ImportError( "O365 package not found, please install it with `pip install o365`" ) if self.auth_with_token: token_storage = _O365TokenStorage() token_path = token_storage.token_path token_backend = FileSystemTokenBackend( token_path=token_path.parent, token_filename=token_path.name ) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=self._scopes, token_backend=token_backend, **{"raise_http_errors": False}, ) else: token_backend = FileSystemTokenBackend( token_path=Path.home() / ".credentials" ) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=self._scopes, token_backend=token_backend, **{"raise_http_errors": False}, ) # make the auth account.authenticate() return account
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_bagel.py
from libs.core.langchain_core.documents import Document from langchain_community.vectorstores import Bagel from tests.integration_tests.vectorstores.fake_embeddings import ( FakeEmbeddings, ) def test_similarity_search() -> None: """Test similarity search""" from bagel.config import Settings setting = Settings( bagel_api_impl="rest", bagel_server_host="api.bageldb.ai", ) bagel = Bagel(client_settings=setting) bagel.add_texts(texts=["hello bagel", "hello langchain"]) result = bagel.similarity_search(query="bagel", k=1) assert result == [Document(page_content="hello bagel")] bagel.delete_cluster() def test_bagel() -> None: """Test from_texts""" texts = ["hello bagel", "hello langchain"] txt_search = Bagel.from_texts(cluster_name="testing", texts=texts) output = txt_search.similarity_search("hello bagel", k=1) assert output == [Document(page_content="hello bagel")] txt_search.delete_cluster() def test_with_metadatas() -> None: """Test end to end construction and search.""" texts = ["hello bagel", "hello langchain"] metadatas = [{"metadata": str(i)} for i in range(len(texts))] txt_search = Bagel.from_texts( cluster_name="testing", texts=texts, metadatas=metadatas, ) output = txt_search.similarity_search("hello bagel", k=1) assert output == [Document(page_content="hello bagel", metadata={"metadata": "0"})] txt_search.delete_cluster() def test_with_metadatas_with_scores() -> None: """Test end to end construction and scored search.""" texts = ["hello bagel", "hello langchain"] metadatas = [{"page": str(i)} for i in range(len(texts))] txt_search = Bagel.from_texts( cluster_name="testing", texts=texts, metadatas=metadatas ) output = txt_search.similarity_search_with_score("hello bagel", k=1) assert output == [ (Document(page_content="hello bagel", metadata={"page": "0"}), 0.0) ] txt_search.delete_cluster() def test_with_metadatas_with_scores_using_vector() -> None: """Test end to end construction and scored search, using embedding vector.""" texts = ["hello bagel", "hello langchain"] metadatas = [{"page": str(i)} for i in range(len(texts))] embeddings = [[1.1, 2.3, 3.2], [0.3, 0.3, 0.1]] vector_search = Bagel.from_texts( cluster_name="testing_vector", texts=texts, metadatas=metadatas, text_embeddings=embeddings, ) embedded_query = [1.1, 2.3, 3.2] output = vector_search.similarity_search_by_vector_with_relevance_scores( query_embeddings=embedded_query, k=1 ) assert output == [ (Document(page_content="hello bagel", metadata={"page": "0"}), 0.0) ] vector_search.delete_cluster() def test_with_metadatas_with_scores_using_vector_embe() -> None: """Test end to end construction and scored search, using embedding vector.""" texts = ["hello bagel", "hello langchain"] metadatas = [{"page": str(i)} for i in range(len(texts))] embedding_function = FakeEmbeddings() vector_search = Bagel.from_texts( cluster_name="testing_vector_embedding1", texts=texts, metadatas=metadatas, embedding=embedding_function, ) embedded_query = embedding_function.embed_query("hello bagel") output = vector_search.similarity_search_by_vector_with_relevance_scores( query_embeddings=embedded_query, k=1 ) assert output == [ (Document(page_content="hello bagel", metadata={"page": "0"}), 0.0) ] vector_search.delete_cluster() def test_search_filter() -> None: """Test end to end construction and search with metadata filtering.""" texts = ["hello bagel", "hello langchain"] metadatas = [{"first_letter": text[0]} for text in texts] txt_search = Bagel.from_texts( cluster_name="testing", texts=texts, metadatas=metadatas, ) output = txt_search.similarity_search("bagel", k=1, where={"first_letter": "h"}) assert output == [ Document(page_content="hello bagel", metadata={"first_letter": "h"}) ] output = txt_search.similarity_search("langchain", k=1, where={"first_letter": "h"}) assert output == [ Document(page_content="hello langchain", metadata={"first_letter": "h"}) ] txt_search.delete_cluster() def test_search_filter_with_scores() -> None: texts = ["hello bagel", "this is langchain"] metadatas = [{"source": "notion"}, {"source": "google"}] txt_search = Bagel.from_texts( cluster_name="testing", texts=texts, metadatas=metadatas, ) output = txt_search.similarity_search_with_score( "hello bagel", k=1, where={"source": "notion"} ) assert output == [ (Document(page_content="hello bagel", metadata={"source": "notion"}), 0.0) ] txt_search.delete_cluster() def test_with_include_parameter() -> None: """Test end to end construction and include parameter.""" texts = ["hello bagel", "this is langchain"] docsearch = Bagel.from_texts(cluster_name="testing", texts=texts) output = docsearch.get(include=["embeddings"]) assert output["embeddings"] is not None output = docsearch.get() assert output["embeddings"] is None docsearch.delete_cluster() def test_bagel_update_document() -> None: """Test the update_document function in the Bagel class.""" initial_content = "bagel" document_id = "doc1" original_doc = Document(page_content=initial_content, metadata={"page": "0"}) docsearch = Bagel.from_documents( cluster_name="testing_docs", documents=[original_doc], ids=[document_id], ) updated_content = "updated bagel doc" updated_doc = Document(page_content=updated_content, metadata={"page": "0"}) docsearch.update_document(document_id=document_id, document=updated_doc) output = docsearch.similarity_search(updated_content, k=1) assert output == [Document(page_content=updated_content, metadata={"page": "0"})]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~everlyai.py
"""EverlyAI Endpoints chat wrapper. Relies heavily on ChatOpenAI.""" from __future__ import annotations import logging import sys from typing import TYPE_CHECKING, Dict, Optional, Set from libs.core.langchain_core.messages import BaseMessage from libs.core.langchain_core.pydantic_v1 import Field, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.adapters.openai import convert_message_to_dict from langchain_community.chat_models.openai import ( ChatOpenAI, _import_tiktoken, ) if TYPE_CHECKING: import tiktoken logger = logging.getLogger(__name__) DEFAULT_API_BASE = "https://everlyai.xyz/hosted" DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf" class ChatEverlyAI(ChatOpenAI): """`EverlyAI` Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``EVERLYAI_API_KEY`` set with your API key. Alternatively, you can use the everlyai_api_key keyword argument. Any parameters that are valid to be passed to the `openai.create` call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain_community.chat_models import ChatEverlyAI chat = ChatEverlyAI(model_name="meta-llama/Llama-2-7b-chat-hf") """ @property def _llm_type(self) -> str: """Return type of chat model.""" return "everlyai-chat" @property def lc_secrets(self) -> Dict[str, str]: return {"everlyai_api_key": "EVERLYAI_API_KEY"} @classmethod def is_lc_serializable(cls) -> bool: return False everlyai_api_key: Optional[str] = None """EverlyAI Endpoints API keys.""" model_name: str = Field(default=DEFAULT_MODEL, alias="model") """Model name to use.""" everlyai_api_base: str = DEFAULT_API_BASE """Base URL path for API requests.""" available_models: Optional[Set[str]] = None """Available models from EverlyAI API.""" @staticmethod def get_available_models() -> Set[str]: """Get available models from EverlyAI API.""" # EverlyAI doesn't yet support dynamically query for available models. return set( [ "meta-llama/Llama-2-7b-chat-hf", "meta-llama/Llama-2-13b-chat-hf-quantized", ] ) @root_validator(pre=True) def validate_environment_override(cls, values: dict) -> dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "everlyai_api_key", "EVERLYAI_API_KEY", ) values["openai_api_base"] = DEFAULT_API_BASE try: import openai except ImportError as e: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`.", ) from e try: values["client"] = openai.ChatCompletion except AttributeError as exc: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`.", ) from exc if "model_name" not in values.keys(): values["model_name"] = DEFAULT_MODEL model_name = values["model_name"] available_models = cls.get_available_models() if model_name not in available_models: raise ValueError( f"Model name {model_name} not found in available models: " f"{available_models}.", ) values["available_models"] = available_models return values def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]: tiktoken_ = _import_tiktoken() if self.tiktoken_model_name is not None: model = self.tiktoken_model_name else: model = self.model_name # Returns the number of tokens used by a list of messages. try: encoding = tiktoken_.encoding_for_model("gpt-3.5-turbo-0301") except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken_.get_encoding(model) return model, encoding def get_num_tokens_from_messages(self, messages: list[BaseMessage]) -> int: """Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() tokens_per_message = 3 tokens_per_name = 1 num_tokens = 0 messages_dict = [convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): # Cast str(value) in case the message value is not a string # This occurs with function messages num_tokens += len(encoding.encode(str(value))) if key == "name": num_tokens += tokens_per_name # every reply is primed with <im_start>assistant num_tokens += 3 return num_tokens
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~chat_models~test_azure_openai.py
"""Test AzureChatOpenAI wrapper.""" import os from typing import Any import pytest from libs.core.langchain_core.callbacks import CallbackManager from libs.core.langchain_core.messages import BaseMessage, HumanMessage from libs.core.langchain_core.outputs import ChatGeneration, ChatResult, LLMResult from langchain_community.chat_models import AzureChatOpenAI from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "") OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "") OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY", "") DEPLOYMENT_NAME = os.environ.get( "AZURE_OPENAI_DEPLOYMENT_NAME", os.environ.get("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", ""), ) def _get_llm(**kwargs: Any) -> AzureChatOpenAI: return AzureChatOpenAI( deployment_name=DEPLOYMENT_NAME, openai_api_version=OPENAI_API_VERSION, azure_endpoint=OPENAI_API_BASE, openai_api_key=OPENAI_API_KEY, **kwargs, ) @pytest.mark.scheduled @pytest.fixture def llm() -> AzureChatOpenAI: return _get_llm( max_tokens=10, ) def test_chat_openai(llm: AzureChatOpenAI) -> None: """Test AzureChatOpenAI wrapper.""" message = HumanMessage(content="Hello") response = llm([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @pytest.mark.scheduled def test_chat_openai_generate() -> None: """Test AzureChatOpenAI wrapper with generate.""" chat = _get_llm(max_tokens=10, n=2) message = HumanMessage(content="Hello") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 2 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content @pytest.mark.scheduled def test_chat_openai_multiple_completions() -> None: """Test AzureChatOpenAI wrapper with multiple completions.""" chat = _get_llm(max_tokens=10, n=5) message = HumanMessage(content="Hello") response = chat._generate([message]) assert isinstance(response, ChatResult) assert len(response.generations) == 5 for generation in response.generations: assert isinstance(generation.message, BaseMessage) assert isinstance(generation.message.content, str) @pytest.mark.scheduled def test_chat_openai_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = _get_llm( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Hello") response = chat([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) @pytest.mark.scheduled def test_chat_openai_streaming_generation_info() -> None: """Test that generation info is preserved when streaming.""" class _FakeCallback(FakeCallbackHandler): saved_things: dict = {} def on_llm_end( self, *args: Any, **kwargs: Any, ) -> Any: # Save the generation self.saved_things["generation"] = args[0] callback = _FakeCallback() callback_manager = CallbackManager([callback]) chat = _get_llm( max_tokens=2, temperature=0, callback_manager=callback_manager, ) list(chat.stream("hi")) generation = callback.saved_things["generation"] # `Hello!` is two tokens, assert that that is what is returned assert generation.generations[0][0].text == "Hello!" @pytest.mark.scheduled async def test_async_chat_openai() -> None: """Test async generation.""" chat = _get_llm(max_tokens=10, n=2) message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 2 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content @pytest.mark.scheduled async def test_async_chat_openai_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = _get_llm( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert callback_handler.llm_streams > 0 assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content @pytest.mark.scheduled def test_openai_streaming(llm: AzureChatOpenAI) -> None: """Test streaming tokens from OpenAI.""" for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str) @pytest.mark.scheduled async def test_openai_astream(llm: AzureChatOpenAI) -> None: """Test streaming tokens from OpenAI.""" async for token in llm.astream("I'm Pickle Rick"): assert isinstance(token.content, str) @pytest.mark.scheduled async def test_openai_abatch(llm: AzureChatOpenAI) -> None: """Test streaming tokens from AzureChatOpenAI.""" result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str) @pytest.mark.scheduled async def test_openai_abatch_tags(llm: AzureChatOpenAI) -> None: """Test batch tokens from AzureChatOpenAI.""" result = await llm.abatch( ["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]} ) for token in result: assert isinstance(token.content, str) @pytest.mark.scheduled def test_openai_batch(llm: AzureChatOpenAI) -> None: """Test batch tokens from AzureChatOpenAI.""" result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str) @pytest.mark.scheduled async def test_openai_ainvoke(llm: AzureChatOpenAI) -> None: """Test invoke tokens from AzureChatOpenAI.""" result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]}) assert isinstance(result.content, str) @pytest.mark.scheduled def test_openai_invoke(llm: AzureChatOpenAI) -> None: """Test invoke tokens from AzureChatOpenAI.""" result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"])) assert isinstance(result.content, str)
[ "Hello" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~gutenberg.py
from typing import List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class GutenbergLoader(BaseLoader): """Load from `Gutenberg.org`.""" def __init__(self, file_path: str): """Initialize with a file path.""" if not file_path.startswith("https://www.gutenberg.org"): raise ValueError("file path must start with 'https://www.gutenberg.org'") if not file_path.endswith(".txt"): raise ValueError("file path must end with '.txt'") self.file_path = file_path def load(self) -> List[Document]: """Load file.""" from urllib.request import urlopen elements = urlopen(self.file_path) text = "\n\n".join([str(el.decode("utf-8-sig")) for el in elements]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~blockchain.py
import os import re import time from enum import Enum from typing import List, Optional import requests from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class BlockchainType(Enum): """Enumerator of the supported blockchains.""" ETH_MAINNET = "eth-mainnet" ETH_GOERLI = "eth-goerli" POLYGON_MAINNET = "polygon-mainnet" POLYGON_MUMBAI = "polygon-mumbai" class BlockchainDocumentLoader(BaseLoader): """Load elements from a blockchain smart contract. The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet, Polygon mainnet, and Polygon Mumbai testnet. If no BlockchainType is specified, the default is Ethereum mainnet. The Loader uses the Alchemy API to interact with the blockchain. ALCHEMY_API_KEY environment variable must be set to use this loader. The API returns 100 NFTs per request and can be paginated using the startToken parameter. If get_all_tokens is set to True, the loader will get all tokens on the contract. Note that for contracts with a large number of tokens, this may take a long time (e.g. 10k tokens is 100 requests). Default value is false for this reason. The max_execution_time (sec) can be set to limit the execution time of the loader. Future versions of this loader can: - Support additional Alchemy APIs (e.g. getTransactions, etc.) - Support additional blockain APIs (e.g. Infura, Opensea, etc.) """ def __init__( self, contract_address: str, blockchainType: BlockchainType = BlockchainType.ETH_MAINNET, api_key: str = "docs-demo", startToken: str = "", get_all_tokens: bool = False, max_execution_time: Optional[int] = None, ): """ Args: contract_address: The address of the smart contract. blockchainType: The blockchain type. api_key: The Alchemy API key. startToken: The start token for pagination. get_all_tokens: Whether to get all tokens on the contract. max_execution_time: The maximum execution time (sec). """ self.contract_address = contract_address self.blockchainType = blockchainType.value self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key self.startToken = startToken self.get_all_tokens = get_all_tokens self.max_execution_time = max_execution_time if not self.api_key: raise ValueError("Alchemy API key not provided.") if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address): raise ValueError(f"Invalid contract address {self.contract_address}") def load(self) -> List[Document]: result = [] current_start_token = self.startToken start_time = time.time() while True: url = ( f"https://{self.blockchainType}.g.alchemy.com/nft/v2/" f"{self.api_key}/getNFTsForCollection?withMetadata=" f"True&contractAddress={self.contract_address}" f"&startToken={current_start_token}" ) response = requests.get(url) if response.status_code != 200: raise ValueError( f"Request failed with status code {response.status_code}" ) items = response.json()["nfts"] if not items: break for item in items: content = str(item) tokenId = item["id"]["tokenId"] metadata = { "source": self.contract_address, "blockchain": self.blockchainType, "tokenId": tokenId, } result.append(Document(page_content=content, metadata=metadata)) # exit after the first API call if get_all_tokens is False if not self.get_all_tokens: break # get the start token for the next API call from the last item in array current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"]) if ( self.max_execution_time is not None and (time.time() - start_time) > self.max_execution_time ): raise RuntimeError("Execution time exceeded the allowed time limit.") if not result: raise ValueError( f"No NFTs found for contract address {self.contract_address}" ) return result # add one to the tokenId, ensuring the correct tokenId format is used def _get_next_tokenId(self, tokenId: str) -> str: value_type = self._detect_value_type(tokenId) if value_type == "hex_0x": value_int = int(tokenId, 16) elif value_type == "hex_0xbf": value_int = int(tokenId[2:], 16) else: value_int = int(tokenId) result = value_int + 1 if value_type == "hex_0x": return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x") elif value_type == "hex_0xbf": return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x") else: return str(result) # A smart contract can use different formats for the tokenId @staticmethod def _detect_value_type(tokenId: str) -> str: if isinstance(tokenId, int): return "int" elif tokenId.startswith("0x"): return "hex_0x" elif tokenId.startswith("0xbf"): return "hex_0xbf" else: return "hex_0xbf"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_loaders~telegram.py
import json import logging import os import tempfile import zipfile from pathlib import Path from typing import Iterator, List, Union from libs.core.langchain_core.chat_sessions import ChatSession from libs.core.langchain_core.messages import AIMessage, BaseMessage, HumanMessage from langchain_community.chat_loaders.base import BaseChatLoader logger = logging.getLogger(__name__) class TelegramChatLoader(BaseChatLoader): """Load `telegram` conversations to LangChain chat messages. To export, use the Telegram Desktop app from https://desktop.telegram.org/, select a conversation, click the three dots in the top right corner, and select "Export chat history". Then select "Machine-readable JSON" (preferred) to export. Note: the 'lite' versions of the desktop app (like "Telegram for MacOS") do not support exporting chat history. """ def __init__( self, path: Union[str, Path], ): """Initialize the TelegramChatLoader. Args: path (Union[str, Path]): Path to the exported Telegram chat zip, directory, json, or HTML file. """ self.path = path if isinstance(path, str) else str(path) def _load_single_chat_session_html(self, file_path: str) -> ChatSession: """Load a single chat session from an HTML file. Args: file_path (str): Path to the HTML file. Returns: ChatSession: The loaded chat session. """ try: from bs4 import BeautifulSoup except ImportError: raise ImportError( "Please install the 'beautifulsoup4' package to load" " Telegram HTML files. You can do this by running" "'pip install beautifulsoup4' in your terminal." ) with open(file_path, "r", encoding="utf-8") as file: soup = BeautifulSoup(file, "html.parser") results: List[Union[HumanMessage, AIMessage]] = [] previous_sender = None for message in soup.select(".message.default"): timestamp = message.select_one(".pull_right.date.details")["title"] from_name_element = message.select_one(".from_name") if from_name_element is None and previous_sender is None: logger.debug("from_name not found in message") continue elif from_name_element is None: from_name = previous_sender else: from_name = from_name_element.text.strip() text = message.select_one(".text").text.strip() results.append( HumanMessage( content=text, additional_kwargs={ "sender": from_name, "events": [{"message_time": timestamp}], }, ) ) previous_sender = from_name return ChatSession(messages=results) def _load_single_chat_session_json(self, file_path: str) -> ChatSession: """Load a single chat session from a JSON file. Args: file_path (str): Path to the JSON file. Returns: ChatSession: The loaded chat session. """ with open(file_path, "r", encoding="utf-8") as file: data = json.load(file) messages = data.get("messages", []) results: List[BaseMessage] = [] for message in messages: text = message.get("text", "") timestamp = message.get("date", "") from_name = message.get("from", "") results.append( HumanMessage( content=text, additional_kwargs={ "sender": from_name, "events": [{"message_time": timestamp}], }, ) ) return ChatSession(messages=results) def _iterate_files(self, path: str) -> Iterator[str]: """Iterate over files in a directory or zip file. Args: path (str): Path to the directory or zip file. Yields: str: Path to each file. """ if os.path.isfile(path) and path.endswith((".html", ".json")): yield path elif os.path.isdir(path): for root, _, files in os.walk(path): for file in files: if file.endswith((".html", ".json")): yield os.path.join(root, file) elif zipfile.is_zipfile(path): with zipfile.ZipFile(path) as zip_file: for file in zip_file.namelist(): if file.endswith((".html", ".json")): with tempfile.TemporaryDirectory() as temp_dir: yield zip_file.extract(file, path=temp_dir) def lazy_load(self) -> Iterator[ChatSession]: """Lazy load the messages from the chat file and yield them in as chat sessions. Yields: ChatSession: The loaded chat session. """ for file_path in self._iterate_files(self.path): if file_path.endswith(".html"): yield self._load_single_chat_session_html(file_path) elif file_path.endswith(".json"): yield self._load_single_chat_session_json(file_path)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~document_loaders~test_geodataframe.py
from __future__ import annotations from typing import TYPE_CHECKING import pytest from libs.core.langchain_core.documents import Document from langchain_community.document_loaders import GeoDataFrameLoader if TYPE_CHECKING: from geopandas import GeoDataFrame else: GeoDataFrame = "geopandas.GeoDataFrame" @pytest.mark.requires("geopandas") def sample_gdf() -> GeoDataFrame: import geopandas # TODO: geopandas.datasets will be deprecated in 1.0 path_to_data = geopandas.datasets.get_path("nybb") gdf = geopandas.read_file(path_to_data) gdf["area"] = gdf.area gdf["crs"] = gdf.crs.to_string() return gdf.head(2) @pytest.mark.requires("geopandas") def test_load_returns_list_of_documents(sample_gdf: GeoDataFrame) -> None: loader = GeoDataFrameLoader(sample_gdf) docs = loader.load() assert isinstance(docs, list) assert all(isinstance(doc, Document) for doc in docs) assert len(docs) == 2 @pytest.mark.requires("geopandas") def test_load_converts_dataframe_columns_to_document_metadata( sample_gdf: GeoDataFrame, ) -> None: loader = GeoDataFrameLoader(sample_gdf) docs = loader.load() for i, doc in enumerate(docs): assert doc.metadata["area"] == sample_gdf.loc[i, "area"] assert doc.metadata["crs"] == sample_gdf.loc[i, "crs"]
[]
2024-01-10
mth93/langchain
templates~propositional-retrieval~propositional_retrieval~storage.py
import logging from pathlib import Path from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore from langchain_community.vectorstores import Chroma logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def get_multi_vector_retriever(docstore_id_key: str): """Create the composed retriever object.""" vectorstore = get_vectorstore() store = get_docstore() return MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=docstore_id_key, ) def get_vectorstore(collection_name: str = "proposals"): """Get the vectorstore used for this example.""" return Chroma( collection_name=collection_name, persist_directory=str(Path(__file__).parent.parent / "chroma_db_proposals"), embedding_function=OpenAIEmbeddings(), ) def get_docstore(): """Get the metadata store used for this example.""" return LocalFileStore( str(Path(__file__).parent.parent / "multi_vector_retriever_metadata") )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~vertexai.py
from __future__ import annotations from concurrent.futures import Executor, ThreadPoolExecutor from typing import TYPE_CHECKING, Any, ClassVar, Dict, Iterator, List, Optional, Union from libs.core.langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import BaseLLM from libs.core.langchain_core.outputs import Generation, GenerationChunk, LLMResult from libs.core.langchain_core.pydantic_v1 import BaseModel, Field, root_validator from langchain_community.utilities.vertexai import ( create_retry_decorator, get_client_info, init_vertexai, raise_vertex_import_error, ) if TYPE_CHECKING: from google.cloud.aiplatform.gapic import ( PredictionServiceAsyncClient, PredictionServiceClient, ) from google.cloud.aiplatform.models import Prediction from google.protobuf.struct_pb2 import Value from vertexai.language_models._language_models import ( TextGenerationResponse, _LanguageModel, ) from vertexai.preview.generative_models import Image # This is for backwards compatibility # We can remove after `langchain` stops importing it _response_to_generation = None completion_with_retry = None stream_completion_with_retry = None def is_codey_model(model_name: str) -> bool: """Returns True if the model name is a Codey model.""" return "code" in model_name def is_gemini_model(model_name: str) -> bool: """Returns True if the model name is a Gemini model.""" return model_name is not None and "gemini" in model_name def completion_with_retry( llm: VertexAI, prompt: List[Union[str, "Image"]], stream: bool = False, is_gemini: bool = False, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry( prompt: List[Union[str, "Image"]], is_gemini: bool = False, **kwargs: Any ) -> Any: if is_gemini: return llm.client.generate_content( prompt, stream=stream, generation_config=kwargs ) else: if stream: return llm.client.predict_streaming(prompt[0], **kwargs) return llm.client.predict(prompt[0], **kwargs) return _completion_with_retry(prompt, is_gemini, **kwargs) async def acompletion_with_retry( llm: VertexAI, prompt: str, is_gemini: bool = False, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = create_retry_decorator(llm, run_manager=run_manager) @retry_decorator async def _acompletion_with_retry( prompt: str, is_gemini: bool = False, **kwargs: Any ) -> Any: if is_gemini: return await llm.client.generate_content_async( prompt, generation_config=kwargs ) return await llm.client.predict_async(prompt, **kwargs) return await _acompletion_with_retry(prompt, is_gemini, **kwargs) class _VertexAIBase(BaseModel): project: Optional[str] = None "The default GCP project to use when making Vertex API calls." location: str = "us-central1" "The default location to use when making API calls." request_parallelism: int = 5 "The amount of parallelism allowed for requests issued to VertexAI models. " "Default is 5." max_retries: int = 6 """The maximum number of retries to make when generating.""" task_executor: ClassVar[Optional[Executor]] = Field(default=None, exclude=True) stop: Optional[List[str]] = None "Optional list of stop words to use when generating." model_name: Optional[str] = None "Underlying model name." @classmethod def _get_task_executor(cls, request_parallelism: int = 5) -> Executor: if cls.task_executor is None: cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism) return cls.task_executor class _VertexAICommon(_VertexAIBase): client: "_LanguageModel" = None #: :meta private: client_preview: "_LanguageModel" = None #: :meta private: model_name: str "Underlying model name." temperature: float = 0.0 "Sampling temperature, it controls the degree of randomness in token selection." max_output_tokens: int = 128 "Token limit determines the maximum amount of text output from one prompt." top_p: float = 0.95 "Tokens are selected from most probable to least until the sum of their " "probabilities equals the top-p value. Top-p is ignored for Codey models." top_k: int = 40 "How the model selects tokens for output, the next token is selected from " "among the top-k most probable tokens. Top-k is ignored for Codey models." credentials: Any = Field(default=None, exclude=True) "The default custom credentials (google.auth.credentials.Credentials) to use " "when making API calls. If not provided, credentials will be ascertained from " "the environment." n: int = 1 """How many completions to generate for each prompt.""" streaming: bool = False """Whether to stream the results or not.""" @property def _llm_type(self) -> str: return "vertexai" @property def is_codey_model(self) -> bool: return is_codey_model(self.model_name) @property def _is_gemini_model(self) -> bool: return is_gemini_model(self.model_name) @property def _identifying_params(self) -> Dict[str, Any]: """Gets the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _default_params(self) -> Dict[str, Any]: params = { "temperature": self.temperature, "max_output_tokens": self.max_output_tokens, "candidate_count": self.n, } if not self.is_codey_model: params.update( { "top_k": self.top_k, "top_p": self.top_p, } ) return params @classmethod def _try_init_vertexai(cls, values: Dict) -> None: allowed_params = ["project", "location", "credentials"] params = {k: v for k, v in values.items() if k in allowed_params} init_vertexai(**params) return None def _prepare_params( self, stop: Optional[List[str]] = None, stream: bool = False, **kwargs: Any, ) -> dict: stop_sequences = stop or self.stop params_mapping = {"n": "candidate_count"} params = {params_mapping.get(k, k): v for k, v in kwargs.items()} params = {**self._default_params, "stop_sequences": stop_sequences, **params} if stream or self.streaming: params.pop("candidate_count") return params class VertexAI(_VertexAICommon, BaseLLM): """Google Vertex AI large language models.""" model_name: str = "text-bison" "The name of the Vertex AI large language model." tuned_model_name: Optional[str] = None "The name of a tuned model. If provided, model_name is ignored." @classmethod def is_lc_serializable(self) -> bool: return True @classmethod def get_lc_namespace(cls) -> List[str]: """Get the namespace of the langchain object.""" return ["langchain", "llms", "vertexai"] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" tuned_model_name = values.get("tuned_model_name") model_name = values["model_name"] is_gemini = is_gemini_model(values["model_name"]) cls._try_init_vertexai(values) try: from vertexai.language_models import ( CodeGenerationModel, TextGenerationModel, ) from vertexai.preview.language_models import ( CodeGenerationModel as PreviewCodeGenerationModel, ) from vertexai.preview.language_models import ( TextGenerationModel as PreviewTextGenerationModel, ) if is_gemini: from vertexai.preview.generative_models import ( GenerativeModel, ) if is_codey_model(model_name): model_cls = CodeGenerationModel preview_model_cls = PreviewCodeGenerationModel elif is_gemini: model_cls = GenerativeModel preview_model_cls = GenerativeModel else: model_cls = TextGenerationModel preview_model_cls = PreviewTextGenerationModel if tuned_model_name: values["client"] = model_cls.get_tuned_model(tuned_model_name) values["client_preview"] = preview_model_cls.get_tuned_model( tuned_model_name ) else: if is_gemini: values["client"] = model_cls(model_name=model_name) values["client_preview"] = preview_model_cls(model_name=model_name) else: values["client"] = model_cls.from_pretrained(model_name) values["client_preview"] = preview_model_cls.from_pretrained( model_name ) except ImportError: raise_vertex_import_error() if values["streaming"] and values["n"] > 1: raise ValueError("Only one candidate can be generated with streaming!") return values def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text. Useful for checking if an input will fit in a model's context window. Args: text: The string input to tokenize. Returns: The integer number of tokens in the text. """ try: result = self.client_preview.count_tokens([text]) except AttributeError: raise_vertex_import_error() return result.total_tokens def _response_to_generation( self, response: TextGenerationResponse ) -> GenerationChunk: """Converts a stream response to a generation chunk.""" try: generation_info = { "is_blocked": response.is_blocked, "safety_attributes": response.safety_attributes, } except Exception: generation_info = None return GenerationChunk(text=response.text, generation_info=generation_info) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: should_stream = stream if stream is not None else self.streaming params = self._prepare_params(stop=stop, stream=should_stream, **kwargs) generations: List[List[Generation]] = [] for prompt in prompts: if should_stream: generation = GenerationChunk(text="") for chunk in self._stream( prompt, stop=stop, run_manager=run_manager, **kwargs ): generation += chunk generations.append([generation]) else: res = completion_with_retry( self, [prompt], stream=should_stream, is_gemini=self._is_gemini_model, run_manager=run_manager, **params, ) generations.append( [self._response_to_generation(r) for r in res.candidates] ) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: params = self._prepare_params(stop=stop, **kwargs) generations = [] for prompt in prompts: res = await acompletion_with_retry( self, prompt, is_gemini=self._is_gemini_model, run_manager=run_manager, **params, ) generations.append( [self._response_to_generation(r) for r in res.candidates] ) return LLMResult(generations=generations) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: params = self._prepare_params(stop=stop, stream=True, **kwargs) for stream_resp in completion_with_retry( self, [prompt], stream=True, is_gemini=self._is_gemini_model, run_manager=run_manager, **params, ): chunk = self._response_to_generation(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, ) class VertexAIModelGarden(_VertexAIBase, BaseLLM): """Large language models served from Vertex AI Model Garden.""" client: "PredictionServiceClient" = None #: :meta private: async_client: "PredictionServiceAsyncClient" = None #: :meta private: endpoint_id: str "A name of an endpoint where the model has been deployed." allowed_model_args: Optional[List[str]] = None "Allowed optional args to be passed to the model." prompt_arg: str = "prompt" result_arg: Optional[str] = "generated_text" "Set result_arg to None if output of the model is expected to be a string." "Otherwise, if it's a dict, provided an argument that contains the result." @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: from google.api_core.client_options import ClientOptions from google.cloud.aiplatform.gapic import ( PredictionServiceAsyncClient, PredictionServiceClient, ) except ImportError: raise_vertex_import_error() if not values["project"]: raise ValueError( "A GCP project should be provided to run inference on Model Garden!" ) client_options = ClientOptions( api_endpoint=f"{values['location']}-aiplatform.googleapis.com" ) client_info = get_client_info(module="vertex-ai-model-garden") values["client"] = PredictionServiceClient( client_options=client_options, client_info=client_info ) values["async_client"] = PredictionServiceAsyncClient( client_options=client_options, client_info=client_info ) return values @property def endpoint_path(self) -> str: return self.client.endpoint_path( project=self.project, location=self.location, endpoint=self.endpoint_id, ) @property def _llm_type(self) -> str: return "vertexai_model_garden" def _prepare_request(self, prompts: List[str], **kwargs: Any) -> List["Value"]: try: from google.protobuf import json_format from google.protobuf.struct_pb2 import Value except ImportError: raise ImportError( "protobuf package not found, please install it with" " `pip install protobuf`" ) instances = [] for prompt in prompts: if self.allowed_model_args: instance = { k: v for k, v in kwargs.items() if k in self.allowed_model_args } else: instance = {} instance[self.prompt_arg] = prompt instances.append(instance) predict_instances = [ json_format.ParseDict(instance_dict, Value()) for instance_dict in instances ] return predict_instances def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" instances = self._prepare_request(prompts, **kwargs) response = self.client.predict(endpoint=self.endpoint_path, instances=instances) return self._parse_response(response) def _parse_response(self, predictions: "Prediction") -> LLMResult: generations: List[List[Generation]] = [] for result in predictions.predictions: generations.append( [ Generation(text=self._parse_prediction(prediction)) for prediction in result ] ) return LLMResult(generations=generations) def _parse_prediction(self, prediction: Any) -> str: if isinstance(prediction, str): return prediction if self.result_arg: try: return prediction[self.result_arg] except KeyError: if isinstance(prediction, str): error_desc = ( "Provided non-None `result_arg` (result_arg=" f"{self.result_arg}). But got prediction of type " f"{type(prediction)} instead of dict. Most probably, you" "need to set `result_arg=None` during VertexAIModelGarden " "initialization." ) raise ValueError(error_desc) else: raise ValueError(f"{self.result_arg} key not found in prediction!") return prediction async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" instances = self._prepare_request(prompts, **kwargs) response = await self.async_client.predict( endpoint=self.endpoint_path, instances=instances ) return self._parse_response(response)
[ "prompt" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~reddit.py
from __future__ import annotations from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader if TYPE_CHECKING: import praw def _dependable_praw_import() -> praw: try: import praw except ImportError: raise ImportError( "praw package not found, please install it with `pip install praw`" ) return praw class RedditPostsLoader(BaseLoader): """Load `Reddit` posts. Read posts on a subreddit. First, you need to go to https://www.reddit.com/prefs/apps/ and create your application """ def __init__( self, client_id: str, client_secret: str, user_agent: str, search_queries: Sequence[str], mode: str, categories: Sequence[str] = ["new"], number_posts: Optional[int] = 10, ): """ Initialize with client_id, client_secret, user_agent, search_queries, mode, categories, number_posts. Example: https://www.reddit.com/r/learnpython/ Args: client_id: Reddit client id. client_secret: Reddit client secret. user_agent: Reddit user agent. search_queries: The search queries. mode: The mode. categories: The categories. Default: ["new"] number_posts: The number of posts. Default: 10 """ self.client_id = client_id self.client_secret = client_secret self.user_agent = user_agent self.search_queries = search_queries self.mode = mode self.categories = categories self.number_posts = number_posts def load(self) -> List[Document]: """Load reddits.""" praw = _dependable_praw_import() reddit = praw.Reddit( client_id=self.client_id, client_secret=self.client_secret, user_agent=self.user_agent, ) results: List[Document] = [] if self.mode == "subreddit": for search_query in self.search_queries: for category in self.categories: docs = self._subreddit_posts_loader( search_query=search_query, category=category, reddit=reddit ) results.extend(docs) elif self.mode == "username": for search_query in self.search_queries: for category in self.categories: docs = self._user_posts_loader( search_query=search_query, category=category, reddit=reddit ) results.extend(docs) else: raise ValueError( "mode not correct, please enter 'username' or 'subreddit' as mode" ) return results def _subreddit_posts_loader( self, search_query: str, category: str, reddit: praw.reddit.Reddit ) -> Iterable[Document]: subreddit = reddit.subreddit(search_query) method = getattr(subreddit, category) cat_posts = method(limit=self.number_posts) """Format reddit posts into a string.""" for post in cat_posts: metadata = { "post_subreddit": post.subreddit_name_prefixed, "post_category": category, "post_title": post.title, "post_score": post.score, "post_id": post.id, "post_url": post.url, "post_author": post.author, } yield Document( page_content=post.selftext, metadata=metadata, ) def _user_posts_loader( self, search_query: str, category: str, reddit: praw.reddit.Reddit ) -> Iterable[Document]: user = reddit.redditor(search_query) method = getattr(user.submissions, category) cat_posts = method(limit=self.number_posts) """Format reddit posts into a string.""" for post in cat_posts: metadata = { "post_subreddit": post.subreddit_name_prefixed, "post_category": category, "post_title": post.title, "post_score": post.score, "post_id": post.id, "post_url": post.url, "post_author": post.author, } yield Document( page_content=post.selftext, metadata=metadata, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~utilities~tensorflow_datasets.py
import logging from typing import Any, Callable, Dict, Iterator, List, Optional from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator logger = logging.getLogger(__name__) class TensorflowDatasets(BaseModel): """Access to the TensorFlow Datasets. The Current implementation can work only with datasets that fit in a memory. `TensorFlow Datasets` is a collection of datasets ready to use, with TensorFlow or other Python ML frameworks, such as Jax. All datasets are exposed as `tf.data.Datasets`. To get started see the Guide: https://www.tensorflow.org/datasets/overview and the list of datasets: https://www.tensorflow.org/datasets/catalog/ overview#all_datasets You have to provide the sample_to_document_function: a function that a sample from the dataset-specific format to the Document. Attributes: dataset_name: the name of the dataset to load split_name: the name of the split to load. Defaults to "train". load_max_docs: a limit to the number of loaded documents. Defaults to 100. sample_to_document_function: a function that converts a dataset sample to a Document Example: .. code-block:: python from langchain_community.utilities import TensorflowDatasets def mlqaen_example_to_document(example: dict) -> Document: return Document( page_content=decode_to_str(example["context"]), metadata={ "id": decode_to_str(example["id"]), "title": decode_to_str(example["title"]), "question": decode_to_str(example["question"]), "answer": decode_to_str(example["answers"]["text"][0]), }, ) tsds_client = TensorflowDatasets( dataset_name="mlqa/en", split_name="train", load_max_docs=MAX_DOCS, sample_to_document_function=mlqaen_example_to_document, ) """ dataset_name: str = "" split_name: str = "train" load_max_docs: int = 100 sample_to_document_function: Optional[Callable[[Dict], Document]] = None dataset: Any #: :meta private: @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: import tensorflow # noqa: F401 except ImportError: raise ImportError( "Could not import tensorflow python package. " "Please install it with `pip install tensorflow`." ) try: import tensorflow_datasets except ImportError: raise ImportError( "Could not import tensorflow_datasets python package. " "Please install it with `pip install tensorflow-datasets`." ) if values["sample_to_document_function"] is None: raise ValueError( "sample_to_document_function is None. " "Please provide a function that converts a dataset sample to" " a Document." ) values["dataset"] = tensorflow_datasets.load( values["dataset_name"], split=values["split_name"] ) return values def lazy_load(self) -> Iterator[Document]: """Download a selected dataset lazily. Returns: an iterator of Documents. """ return ( self.sample_to_document_function(s) for s in self.dataset.take(self.load_max_docs) if self.sample_to_document_function is not None ) def load(self) -> List[Document]: """Download a selected dataset. Returns: a list of Documents. """ return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~weather.py
"""Simple reader that reads weather data from OpenWeatherMap API""" from __future__ import annotations from datetime import datetime from typing import Iterator, List, Optional, Sequence from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper class WeatherDataLoader(BaseLoader): """Load weather data with `Open Weather Map` API. Reads the forecast & current weather of any location using OpenWeatherMap's free API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free OpenWeatherMap API. """ def __init__( self, client: OpenWeatherMapAPIWrapper, places: Sequence[str], ) -> None: """Initialize with parameters.""" super().__init__() self.client = client self.places = places @classmethod def from_params( cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None ) -> WeatherDataLoader: client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) return cls(client, places) def lazy_load( self, ) -> Iterator[Document]: """Lazily load weather data for the given locations.""" for place in self.places: metadata = {"queried_at": datetime.now()} content = self.client.run(place) yield Document(page_content=content, metadata=metadata) def load( self, ) -> List[Document]: """Load weather data for the given locations.""" return list(self.lazy_load())
[]