date_collected
stringclasses
1 value
repo_name
stringlengths
6
116
file_name
stringlengths
2
220
file_contents
stringlengths
13
357k
prompts
sequence
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~mediawikidump.py
import logging from pathlib import Path from typing import List, Optional, Sequence, Union from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class MWDumpLoader(BaseLoader): """Load `MediaWiki` dump from an `XML` file. Example: .. code-block:: python from langchain_community.document_loaders import MWDumpLoader loader = MWDumpLoader( file_path="myWiki.xml", encoding="utf8" ) docs = loader.load() from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=0 ) texts = text_splitter.split_documents(docs) :param file_path: XML local file path :type file_path: str :param encoding: Charset encoding, defaults to "utf8" :type encoding: str, optional :param namespaces: The namespace of pages you want to parse. See https://www.mediawiki.org/wiki/Help:Namespaces#Localisation for a list of all common namespaces :type namespaces: List[int],optional :param skip_redirects: TR=rue to skip pages that redirect to other pages, False to keep them. False by default :type skip_redirects: bool, optional :param stop_on_error: False to skip over pages that cause parsing errors, True to stop. True by default :type stop_on_error: bool, optional """ def __init__( self, file_path: Union[str, Path], encoding: Optional[str] = "utf8", namespaces: Optional[Sequence[int]] = None, skip_redirects: Optional[bool] = False, stop_on_error: Optional[bool] = True, ): self.file_path = file_path if isinstance(file_path, str) else str(file_path) self.encoding = encoding # Namespaces range from -2 to 15, inclusive. self.namespaces = namespaces self.skip_redirects = skip_redirects self.stop_on_error = stop_on_error def load(self) -> List[Document]: """Load from a file path.""" try: import mwparserfromhell import mwxml except ImportError as e: raise ImportError( "Unable to import 'mwparserfromhell' or 'mwxml'. Please install with" " `pip install mwparserfromhell mwxml`." ) from e dump = mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding)) docs = [] for page in dump.pages: if self.skip_redirects and page.redirect: continue if self.namespaces and page.namespace not in self.namespaces: continue try: for revision in page: code = mwparserfromhell.parse(revision.text) text = code.strip_code( normalize=True, collapse=True, keep_template_params=False ) metadata = {"source": page.title} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: logger.error("Parsing error: {}".format(e)) if self.stop_on_error: raise e else: continue return docs
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~stripe.py
import json import urllib.request from typing import List, Optional from libs.core.langchain_core.documents import Document from libs.core.langchain_core.utils import get_from_env, stringify_dict from langchain_community.document_loaders.base import BaseLoader STRIPE_ENDPOINTS = { "balance_transactions": "https://api.stripe.com/v1/balance_transactions", "charges": "https://api.stripe.com/v1/charges", "customers": "https://api.stripe.com/v1/customers", "events": "https://api.stripe.com/v1/events", "refunds": "https://api.stripe.com/v1/refunds", "disputes": "https://api.stripe.com/v1/disputes", } class StripeLoader(BaseLoader): """Load from `Stripe` API.""" def __init__(self, resource: str, access_token: Optional[str] = None) -> None: """Initialize with a resource and an access token. Args: resource: The resource. access_token: The access token. """ self.resource = resource access_token = access_token or get_from_env( "access_token", "STRIPE_ACCESS_TOKEN" ) self.headers = {"Authorization": f"Bearer {access_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = STRIPE_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) def load(self) -> List[Document]: return self._get_resource()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~amazon_api_gateway.py
from typing import Any, Dict, List, Mapping, Optional import requests from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import Extra from langchain_community.llms.utils import enforce_stop_tokens class ContentHandlerAmazonAPIGateway: """Adapter to prepare the inputs from Langchain to a format that LLM model expects. It also provides helper function to extract the generated text from the model response.""" @classmethod def transform_input( cls, prompt: str, model_kwargs: Dict[str, Any] ) -> Dict[str, Any]: return {"inputs": prompt, "parameters": model_kwargs} @classmethod def transform_output(cls, response: Any) -> str: return response.json()[0]["generated_text"] class AmazonAPIGateway(LLM): """Amazon API Gateway to access LLM models hosted on AWS.""" api_url: str """API Gateway URL""" headers: Optional[Dict] = None """API Gateway HTTP Headers to send, e.g. for authentication""" model_kwargs: Optional[Dict] = None """Keyword arguments to pass to the model.""" content_handler: ContentHandlerAmazonAPIGateway = ContentHandlerAmazonAPIGateway() """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"api_url": self.api_url, "headers": self.headers}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "amazon_api_gateway" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Amazon API Gateway model. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} payload = self.content_handler.transform_input(prompt, _model_kwargs) try: response = requests.post( self.api_url, headers=self.headers, json=payload, ) text = self.content_handler.transform_output(response) except Exception as error: raise ValueError(f"Error raised by the service: {error}") if stop is not None: text = enforce_stop_tokens(text, stop) return text
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~dataframe.py
from typing import Any, Iterator, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class BaseDataFrameLoader(BaseLoader): def __init__(self, data_frame: Any, *, page_content_column: str = "text"): """Initialize with dataframe object. Args: data_frame: DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "text". """ self.data_frame = data_frame self.page_content_column = page_content_column def lazy_load(self) -> Iterator[Document]: """Lazy load records from dataframe.""" for _, row in self.data_frame.iterrows(): text = row[self.page_content_column] metadata = row.to_dict() metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata) def load(self) -> List[Document]: """Load full dataframe.""" return list(self.lazy_load()) class DataFrameLoader(BaseDataFrameLoader): """Load `Pandas` DataFrame.""" def __init__(self, data_frame: Any, page_content_column: str = "text"): """Initialize with dataframe object. Args: data_frame: Pandas DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "text". """ try: import pandas as pd except ImportError as e: raise ImportError( "Unable to import pandas, please install with `pip install pandas`." ) from e if not isinstance(data_frame, pd.DataFrame): raise ValueError( f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}" ) super().__init__(data_frame, page_content_column=page_content_column)
[]
2024-01-10
mth93/langchain
templates~rag-timescale-conversation~rag_timescale_conversation~load_sample_dataset.py
import os import tempfile from datetime import datetime, timedelta import requests from langchain.document_loaders import JSONLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores.timescalevector import TimescaleVector from timescale_vector import client def parse_date(date_string: str) -> datetime: if date_string is None: return None time_format = "%a %b %d %H:%M:%S %Y %z" return datetime.strptime(date_string, time_format) def extract_metadata(record: dict, metadata: dict) -> dict: dt = parse_date(record["date"]) metadata["id"] = str(client.uuid_from_time(dt)) if dt is not None: metadata["date"] = dt.isoformat() else: metadata["date"] = None metadata["author"] = record["author"] metadata["commit_hash"] = record["commit"] return metadata def load_ts_git_dataset( service_url, collection_name="timescale_commits", num_records: int = 500, partition_interval=timedelta(days=7), ): json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json" tmp_file = "ts_git_log.json" temp_dir = tempfile.gettempdir() json_file_path = os.path.join(temp_dir, tmp_file) if not os.path.exists(json_file_path): response = requests.get(json_url) if response.status_code == 200: with open(json_file_path, "w") as json_file: json_file.write(response.text) else: print(f"Failed to download JSON file. Status code: {response.status_code}") loader = JSONLoader( file_path=json_file_path, jq_schema=".commit_history[]", text_content=False, metadata_func=extract_metadata, ) documents = loader.load() # Remove documents with None dates documents = [doc for doc in documents if doc.metadata["date"] is not None] if num_records > 0: documents = documents[:num_records] # Split the documents into chunks for embedding text_splitter = CharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() # Create a Timescale Vector instance from the collection of documents TimescaleVector.from_documents( embedding=embeddings, ids=[doc.metadata["id"] for doc in docs], documents=docs, collection_name=collection_name, service_url=service_url, time_partition_interval=partition_interval, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~vald.py
"""Wrapper around Vald vector database.""" from __future__ import annotations from typing import Any, Iterable, List, Optional, Tuple, Type import numpy as np from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance class Vald(VectorStore): """Wrapper around Vald vector database. To use, you should have the ``vald-client-python`` python package installed. Example: .. code-block:: python from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Vald texts = ['foo', 'bar', 'baz'] vald = Vald.from_texts( texts=texts, embedding=HuggingFaceEmbeddings(), host="localhost", port=8080, skip_strict_exist_check=False, ) """ def __init__( self, embedding: Embeddings, host: str = "localhost", port: int = 8080, grpc_options: Tuple = ( ("grpc.keepalive_time_ms", 1000 * 10), ("grpc.keepalive_timeout_ms", 1000 * 10), ), grpc_use_secure: bool = False, grpc_credentials: Optional[Any] = None, ): self._embedding = embedding self.target = host + ":" + str(port) self.grpc_options = grpc_options self.grpc_use_secure = grpc_use_secure self.grpc_credentials = grpc_credentials @property def embeddings(self) -> Optional[Embeddings]: return self._embedding def _get_channel(self) -> Any: try: import grpc except ImportError: raise ValueError( "Could not import grpcio python package. " "Please install it with `pip install grpcio`." ) return ( grpc.secure_channel( self.target, self.grpc_credentials, options=self.grpc_options ) if self.grpc_use_secure else grpc.insecure_channel(self.target, options=self.grpc_options) ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, grpc_metadata: Optional[Any] = None, skip_strict_exist_check: bool = False, **kwargs: Any, ) -> List[str]: """ Args: skip_strict_exist_check: Deprecated. This is not used basically. """ try: from vald.v1.payload import payload_pb2 from vald.v1.vald import upsert_pb2_grpc except ImportError: raise ValueError( "Could not import vald-client-python python package. " "Please install it with `pip install vald-client-python`." ) channel = self._get_channel() # Depending on the network quality, # it is necessary to wait for ChannelConnectivity.READY. # _ = grpc.channel_ready_future(channel).result(timeout=10) stub = upsert_pb2_grpc.UpsertStub(channel) cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=skip_strict_exist_check) ids = [] embs = self._embedding.embed_documents(list(texts)) for text, emb in zip(texts, embs): vec = payload_pb2.Object.Vector(id=text, vector=emb) res = stub.Upsert( payload_pb2.Upsert.Request(vector=vec, config=cfg), metadata=grpc_metadata, ) ids.append(res.uuid) channel.close() return ids def delete( self, ids: Optional[List[str]] = None, skip_strict_exist_check: bool = False, grpc_metadata: Optional[Any] = None, **kwargs: Any, ) -> Optional[bool]: """ Args: skip_strict_exist_check: Deprecated. This is not used basically. """ try: from vald.v1.payload import payload_pb2 from vald.v1.vald import remove_pb2_grpc except ImportError: raise ValueError( "Could not import vald-client-python python package. " "Please install it with `pip install vald-client-python`." ) if ids is None: raise ValueError("No ids provided to delete") channel = self._get_channel() # Depending on the network quality, # it is necessary to wait for ChannelConnectivity.READY. # _ = grpc.channel_ready_future(channel).result(timeout=10) stub = remove_pb2_grpc.RemoveStub(channel) cfg = payload_pb2.Remove.Config(skip_strict_exist_check=skip_strict_exist_check) for _id in ids: oid = payload_pb2.Object.ID(id=_id) _ = stub.Remove( payload_pb2.Remove.Request(id=oid, config=cfg), metadata=grpc_metadata ) channel.close() return True def similarity_search( self, query: str, k: int = 4, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, grpc_metadata: Optional[Any] = None, **kwargs: Any, ) -> List[Document]: docs_and_scores = self.similarity_search_with_score( query, k, radius, epsilon, timeout, grpc_metadata ) docs = [] for doc, _ in docs_and_scores: docs.append(doc) return docs def similarity_search_with_score( self, query: str, k: int = 4, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, grpc_metadata: Optional[Any] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: emb = self._embedding.embed_query(query) docs_and_scores = self.similarity_search_with_score_by_vector( emb, k, radius, epsilon, timeout, grpc_metadata ) return docs_and_scores def similarity_search_by_vector( self, embedding: List[float], k: int = 4, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, grpc_metadata: Optional[Any] = None, **kwargs: Any, ) -> List[Document]: docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, radius, epsilon, timeout, grpc_metadata ) docs = [] for doc, _ in docs_and_scores: docs.append(doc) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, grpc_metadata: Optional[Any] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: try: from vald.v1.payload import payload_pb2 from vald.v1.vald import search_pb2_grpc except ImportError: raise ValueError( "Could not import vald-client-python python package. " "Please install it with `pip install vald-client-python`." ) channel = self._get_channel() # Depending on the network quality, # it is necessary to wait for ChannelConnectivity.READY. # _ = grpc.channel_ready_future(channel).result(timeout=10) stub = search_pb2_grpc.SearchStub(channel) cfg = payload_pb2.Search.Config( num=k, radius=radius, epsilon=epsilon, timeout=timeout ) res = stub.Search( payload_pb2.Search.Request(vector=embedding, config=cfg), metadata=grpc_metadata, ) docs_and_scores = [] for result in res.results: docs_and_scores.append((Document(page_content=result.id), result.distance)) channel.close() return docs_and_scores def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, grpc_metadata: Optional[Any] = None, **kwargs: Any, ) -> List[Document]: emb = self._embedding.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( emb, k=k, fetch_k=fetch_k, radius=radius, epsilon=epsilon, timeout=timeout, lambda_mult=lambda_mult, grpc_metadata=grpc_metadata, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, grpc_metadata: Optional[Any] = None, **kwargs: Any, ) -> List[Document]: try: from vald.v1.payload import payload_pb2 from vald.v1.vald import object_pb2_grpc except ImportError: raise ValueError( "Could not import vald-client-python python package. " "Please install it with `pip install vald-client-python`." ) channel = self._get_channel() # Depending on the network quality, # it is necessary to wait for ChannelConnectivity.READY. # _ = grpc.channel_ready_future(channel).result(timeout=10) stub = object_pb2_grpc.ObjectStub(channel) docs_and_scores = self.similarity_search_with_score_by_vector( embedding, fetch_k=fetch_k, radius=radius, epsilon=epsilon, timeout=timeout, grpc_metadata=grpc_metadata, ) docs = [] embs = [] for doc, _ in docs_and_scores: vec = stub.GetObject( payload_pb2.Object.VectorRequest( id=payload_pb2.Object.ID(id=doc.page_content) ), metadata=grpc_metadata, ) embs.append(vec.vector) docs.append(doc) mmr = maximal_marginal_relevance( np.array(embedding), embs, lambda_mult=lambda_mult, k=k, ) channel.close() return [docs[i] for i in mmr] @classmethod def from_texts( cls: Type[Vald], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, host: str = "localhost", port: int = 8080, grpc_options: Tuple = ( ("grpc.keepalive_time_ms", 1000 * 10), ("grpc.keepalive_timeout_ms", 1000 * 10), ), grpc_use_secure: bool = False, grpc_credentials: Optional[Any] = None, grpc_metadata: Optional[Any] = None, skip_strict_exist_check: bool = False, **kwargs: Any, ) -> Vald: """ Args: skip_strict_exist_check: Deprecated. This is not used basically. """ vald = cls( embedding=embedding, host=host, port=port, grpc_options=grpc_options, grpc_use_secure=grpc_use_secure, grpc_credentials=grpc_credentials, **kwargs, ) vald.add_texts( texts=texts, metadatas=metadatas, grpc_metadata=grpc_metadata, skip_strict_exist_check=skip_strict_exist_check, ) return vald """We will support if there are any requests.""" # async def aadd_texts( # self, # texts: Iterable[str], # metadatas: Optional[List[dict]] = None, # **kwargs: Any, # ) -> List[str]: # pass # # def _select_relevance_score_fn(self) -> Callable[[float], float]: # pass # # def _similarity_search_with_relevance_scores( # self, # query: str, # k: int = 4, # **kwargs: Any, # ) -> List[Tuple[Document, float]]: # pass # # def similarity_search_with_relevance_scores( # self, # query: str, # k: int = 4, # **kwargs: Any, # ) -> List[Tuple[Document, float]]: # pass # # async def amax_marginal_relevance_search_by_vector( # self, # embedding: List[float], # k: int = 4, # fetch_k: int = 20, # lambda_mult: float = 0.5, # **kwargs: Any, # ) -> List[Document]: # pass # # @classmethod # async def afrom_texts( # cls: Type[VST], # texts: List[str], # embedding: Embeddings, # metadatas: Optional[List[dict]] = None, # **kwargs: Any, # ) -> VST: # pass
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~parsers~language~language_parser.py
from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob from langchain_community.document_loaders.parsers.language.cobol import CobolSegmenter from langchain_community.document_loaders.parsers.language.javascript import ( JavaScriptSegmenter, ) from langchain_community.document_loaders.parsers.language.python import PythonSegmenter if TYPE_CHECKING: from langchain.text_splitter import Language try: from langchain.text_splitter import Language LANGUAGE_EXTENSIONS: Dict[str, str] = { "py": Language.PYTHON, "js": Language.JS, "cobol": Language.COBOL, } LANGUAGE_SEGMENTERS: Dict[str, Any] = { Language.PYTHON: PythonSegmenter, Language.JS: JavaScriptSegmenter, Language.COBOL: CobolSegmenter, } except ImportError: LANGUAGE_EXTENSIONS = {} LANGUAGE_SEGMENTERS = {} class LanguageParser(BaseBlobParser): """Parse using the respective programming language syntax. Each top-level function and class in the code is loaded into separate documents. Furthermore, an extra document is generated, containing the remaining top-level code that excludes the already segmented functions and classes. This approach can potentially improve the accuracy of QA models over source code. Currently, the supported languages for code parsing are Python and JavaScript. The language used for parsing can be configured, along with the minimum number of lines required to activate the splitting based on syntax. Examples: .. code-block:: python from langchain.text_splitter.Language from langchain_community.document_loaders.generic import GenericLoader from langchain_community.document_loaders.parsers import LanguageParser loader = GenericLoader.from_filesystem( "./code", glob="**/*", suffixes=[".py", ".js"], parser=LanguageParser() ) docs = loader.load() Example instantiations to manually select the language: .. code-block:: python from langchain.text_splitter import Language loader = GenericLoader.from_filesystem( "./code", glob="**/*", suffixes=[".py"], parser=LanguageParser(language=Language.PYTHON) ) Example instantiations to set number of lines threshold: .. code-block:: python loader = GenericLoader.from_filesystem( "./code", glob="**/*", suffixes=[".py"], parser=LanguageParser(parser_threshold=200) ) """ def __init__(self, language: Optional[Language] = None, parser_threshold: int = 0): """ Language parser that split code using the respective language syntax. Args: language: If None (default), it will try to infer language from source. parser_threshold: Minimum lines needed to activate parsing (0 by default). """ self.language = language self.parser_threshold = parser_threshold def lazy_parse(self, blob: Blob) -> Iterator[Document]: code = blob.as_string() language = self.language or ( LANGUAGE_EXTENSIONS.get(blob.source.rsplit(".", 1)[-1]) if isinstance(blob.source, str) else None ) if language is None: yield Document( page_content=code, metadata={ "source": blob.source, }, ) return if self.parser_threshold >= len(code.splitlines()): yield Document( page_content=code, metadata={ "source": blob.source, "language": language, }, ) return self.Segmenter = LANGUAGE_SEGMENTERS[language] segmenter = self.Segmenter(blob.as_string()) if not segmenter.is_valid(): yield Document( page_content=code, metadata={ "source": blob.source, }, ) return for functions_classes in segmenter.extract_functions_classes(): yield Document( page_content=functions_classes, metadata={ "source": blob.source, "content_type": "functions_classes", "language": language, }, ) yield Document( page_content=segmenter.simplify_code(), metadata={ "source": blob.source, "content_type": "simplified_code", "language": language, }, )
[ "functions_classes", "simplified_code" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~geodataframe.py
from typing import Any, Iterator, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class GeoDataFrameLoader(BaseLoader): """Load `geopandas` Dataframe.""" def __init__(self, data_frame: Any, page_content_column: str = "geometry"): """Initialize with geopandas Dataframe. Args: data_frame: geopandas DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "geometry". """ try: import geopandas as gpd except ImportError: raise ImportError( "geopandas package not found, please install it with " "`pip install geopandas`" ) if not isinstance(data_frame, gpd.GeoDataFrame): raise ValueError( f"Expected data_frame to be a gpd.GeoDataFrame, got {type(data_frame)}" ) if page_content_column not in data_frame.columns: raise ValueError( f"Expected data_frame to have a column named {page_content_column}" ) if not isinstance(data_frame[page_content_column], gpd.GeoSeries): raise ValueError( f"Expected data_frame[{page_content_column}] to be a GeoSeries" ) self.data_frame = data_frame self.page_content_column = page_content_column def lazy_load(self) -> Iterator[Document]: """Lazy load records from dataframe.""" # assumes all geometries in GeoSeries are same CRS and Geom Type crs_str = self.data_frame.crs.to_string() if self.data_frame.crs else None geometry_type = self.data_frame.geometry.geom_type.iloc[0] for _, row in self.data_frame.iterrows(): geom = row[self.page_content_column] xmin, ymin, xmax, ymax = geom.bounds metadata = row.to_dict() metadata["crs"] = crs_str metadata["geometry_type"] = geometry_type metadata["xmin"] = xmin metadata["ymin"] = ymin metadata["xmax"] = xmax metadata["ymax"] = ymax metadata.pop(self.page_content_column) # using WKT instead of str() to help GIS system interoperability yield Document(page_content=geom.wkt, metadata=metadata) def load(self) -> List[Document]: """Load full dataframe.""" return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~trello.py
from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple from libs.core.langchain_core.documents import Document from libs.core.langchain_core.utils import get_from_env from langchain_community.document_loaders.base import BaseLoader if TYPE_CHECKING: from trello import Board, Card, TrelloClient class TrelloLoader(BaseLoader): """Load cards from a `Trello` board.""" def __init__( self, client: TrelloClient, board_name: str, *, include_card_name: bool = True, include_comments: bool = True, include_checklist: bool = True, card_filter: Literal["closed", "open", "all"] = "all", extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"), ): """Initialize Trello loader. Args: client: Trello API client. board_name: The name of the Trello board. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ self.client = client self.board_name = board_name self.include_card_name = include_card_name self.include_comments = include_comments self.include_checklist = include_checklist self.extra_metadata = extra_metadata self.card_filter = card_filter @classmethod def from_credentials( cls, board_name: str, *, api_key: Optional[str] = None, token: Optional[str] = None, **kwargs: Any, ) -> TrelloLoader: """Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ try: from trello import TrelloClient # type: ignore except ImportError as ex: raise ImportError( "Could not import trello python package. " "Please install it with `pip install py-trello`." ) from ex api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY") token = token or get_from_env("token", "TRELLO_TOKEN") client = TrelloClient(api_key=api_key, token=token) return cls(client, board_name, **kwargs) def load(self) -> List[Document]: """Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board. """ try: from bs4 import BeautifulSoup # noqa: F401 except ImportError as ex: raise ImportError( "`beautifulsoup4` package not found, please run" " `pip install beautifulsoup4`" ) from ex board = self._get_board() # Create a dictionary with the list IDs as keys and the list names as values list_dict = {list_item.id: list_item.name for list_item in board.list_lists()} # Get Cards on the board cards = board.get_cards(card_filter=self.card_filter) return [self._card_to_doc(card, list_dict) for card in cards] def _get_board(self) -> Board: # Find the first board with a matching name board = next( (b for b in self.client.list_boards() if b.name == self.board_name), None ) if not board: raise ValueError(f"Board `{self.board_name}` not found.") return board def _card_to_doc(self, card: Card, list_dict: dict) -> Document: from bs4 import BeautifulSoup # type: ignore text_content = "" if self.include_card_name: text_content = card.name + "\n" if card.description.strip(): text_content += BeautifulSoup(card.description, "lxml").get_text() if self.include_checklist: # Get all the checklist items on the card for checklist in card.checklists: if checklist.items: items = [ f"{item['name']}:{item['state']}" for item in checklist.items ] text_content += f"\n{checklist.name}\n" + "\n".join(items) if self.include_comments: # Get all the comments on the card comments = [ BeautifulSoup(comment["data"]["text"], "lxml").get_text() for comment in card.comments ] text_content += "Comments:" + "\n".join(comments) # Default metadata fields metadata = { "title": card.name, "id": card.id, "url": card.url, } # Extra metadata fields. Card object is not subscriptable. if "labels" in self.extra_metadata: metadata["labels"] = [label.name for label in card.labels] if "list" in self.extra_metadata: if card.list_id in list_dict: metadata["list"] = list_dict[card.list_id] if "closed" in self.extra_metadata: metadata["closed"] = card.closed if "due_date" in self.extra_metadata: metadata["due_date"] = card.due_date return Document(page_content=text_content, metadata=metadata)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~mlflow_ai_gateway.py
import logging import warnings from typing import Any, Dict, List, Mapping, Optional from libs.core.langchain_core.callbacks import ( CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.messages import ( AIMessage, BaseMessage, ChatMessage, FunctionMessage, HumanMessage, SystemMessage, ) from libs.core.langchain_core.outputs import ( ChatGeneration, ChatResult, ) from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra logger = logging.getLogger(__name__) # Ignoring type because below is valid pydantic code # Unexpected keyword argument "extra" for "__init_subclass__" of "object" [call-arg] class ChatParams(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Parameters for the `MLflow AI Gateway` LLM.""" temperature: float = 0.0 candidate_count: int = 1 """The number of candidates to return.""" stop: Optional[List[str]] = None max_tokens: Optional[int] = None class ChatMLflowAIGateway(BaseChatModel): """`MLflow AI Gateway` chat models API. To use, you should have the ``mlflow[gateway]`` python package installed. For more information, see https://mlflow.org/docs/latest/gateway/index.html. Example: .. code-block:: python from langchain_community.chat_models import ChatMLflowAIGateway chat = ChatMLflowAIGateway( gateway_uri="<your-mlflow-ai-gateway-uri>", route="<your-mlflow-ai-gateway-chat-route>", params={ "temperature": 0.1 } ) """ def __init__(self, **kwargs: Any): warnings.warn( "`ChatMLflowAIGateway` is deprecated. Use `ChatMlflow` or " "`ChatDatabricks` instead.", DeprecationWarning, ) try: import mlflow.gateway except ImportError as e: raise ImportError( "Could not import `mlflow.gateway` module. " "Please install it with `pip install mlflow[gateway]`." ) from e super().__init__(**kwargs) if self.gateway_uri: mlflow.gateway.set_gateway_uri(self.gateway_uri) route: str gateway_uri: Optional[str] = None params: Optional[ChatParams] = None @property def _default_params(self) -> Dict[str, Any]: params: Dict[str, Any] = { "gateway_uri": self.gateway_uri, "route": self.route, **(self.params.dict() if self.params else {}), } return params def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: try: import mlflow.gateway except ImportError as e: raise ImportError( "Could not import `mlflow.gateway` module. " "Please install it with `pip install mlflow[gateway]`." ) from e message_dicts = [ ChatMLflowAIGateway._convert_message_to_dict(message) for message in messages ] data: Dict[str, Any] = { "messages": message_dicts, **(self.params.dict() if self.params else {}), } resp = mlflow.gateway.query(self.route, data=data) return ChatMLflowAIGateway._create_chat_result(resp) @property def _identifying_params(self) -> Dict[str, Any]: return self._default_params def _get_invocation_params( self, stop: Optional[List[str]] = None, **kwargs: Any ) -> Dict[str, Any]: """Get the parameters used to invoke the model FOR THE CALLBACKS.""" return { **self._default_params, **super()._get_invocation_params(stop=stop, **kwargs), } @property def _llm_type(self) -> str: """Return type of chat model.""" return "mlflow-ai-gateway-chat" @staticmethod def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] content = _dict["content"] if role == "user": return HumanMessage(content=content) elif role == "assistant": return AIMessage(content=content) elif role == "system": return SystemMessage(content=content) else: return ChatMessage(content=content, role=role) @staticmethod def _raise_functions_not_supported() -> None: raise ValueError( "Function messages are not supported by the MLflow AI Gateway. Please" " create a feature request at https://github.com/mlflow/mlflow/issues." ) @staticmethod def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, FunctionMessage): raise ValueError( "Function messages are not supported by the MLflow AI Gateway. Please" " create a feature request at https://github.com/mlflow/mlflow/issues." ) else: raise ValueError(f"Got unknown message type: {message}") if "function_call" in message.additional_kwargs: ChatMLflowAIGateway._raise_functions_not_supported() if message.additional_kwargs: logger.warning( "Additional message arguments are unsupported by MLflow AI Gateway " " and will be ignored: %s", message.additional_kwargs, ) return message_dict @staticmethod def _create_chat_result(response: Mapping[str, Any]) -> ChatResult: generations = [] for candidate in response["candidates"]: message = ChatMLflowAIGateway._convert_dict_to_message(candidate["message"]) message_metadata = candidate.get("metadata", {}) gen = ChatGeneration( message=message, generation_info=dict(message_metadata), ) generations.append(gen) response_metadata = response.get("metadata", {}) return ChatResult(generations=generations, llm_output=response_metadata)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~chat_models~test_litellm.py
"""Test Anthropic API wrapper.""" from typing import List from libs.core.langchain_core.callbacks import ( CallbackManager, ) from libs.core.langchain_core.messages import AIMessage, BaseMessage, HumanMessage from libs.core.langchain_core.outputs import ChatGeneration, LLMResult from langchain_community.chat_models.litellm import ChatLiteLLM from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_litellm_call() -> None: """Test valid call to litellm.""" chat = ChatLiteLLM( model="test", ) message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_litellm_generate() -> None: """Test generate method of anthropic.""" chat = ChatLiteLLM(model="test") chat_messages: List[List[BaseMessage]] = [ [HumanMessage(content="How many toes do dogs have?")] ] messages_copy = [messages.copy() for messages in chat_messages] result: LLMResult = chat.generate(chat_messages) assert isinstance(result, LLMResult) for response in result.generations[0]: assert isinstance(response, ChatGeneration) assert isinstance(response.text, str) assert response.text == response.message.content assert chat_messages == messages_copy def test_litellm_streaming() -> None: """Test streaming tokens from anthropic.""" chat = ChatLiteLLM(model="test", streaming=True) message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_litellm_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = ChatLiteLLM( model="test", streaming=True, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Write me a sentence with 10 words.") chat([message]) assert callback_handler.llm_streams > 1
[ "How many toes do dogs have?", "Write me a sentence with 10 words.", "Hello" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~jaguar.py
from __future__ import annotations import json import logging from typing import Any, List, Optional, Tuple from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore logger = logging.getLogger(__name__) class Jaguar(VectorStore): """`Jaguar API` vector store. See http://www.jaguardb.com See http://github.com/fserv/jaguar-sdk Example: .. code-block:: python from langchain_community.vectorstores.jaguar import Jaguar vectorstore = Jaguar( pod = 'vdb', store = 'mystore', vector_index = 'v', vector_type = 'cosine_fraction_float', vector_dimension = 1536, url='http://192.168.8.88:8080/fwww/', embedding=openai_model ) """ def __init__( self, pod: str, store: str, vector_index: str, vector_type: str, vector_dimension: int, url: str, embedding: Embeddings, ): self._pod = pod self._store = store self._vector_index = vector_index self._vector_type = vector_type self._vector_dimension = vector_dimension self._embedding = embedding try: from jaguardb_http_client.JaguarHttpClient import JaguarHttpClient except ImportError: raise ValueError( "Could not import jaguardb-http-client python package. " "Please install it with `pip install -U jaguardb-http-client`" ) self._jag = JaguarHttpClient(url) self._token = "" def login( self, jaguar_api_key: Optional[str] = "", ) -> bool: """ login to jaguardb server with a jaguar_api_key or let self._jag find a key Args: pod (str): name of a Pod store (str): name of a vector store optional jaguar_api_key (str): API key of user to jaguardb server Returns: True if successful; False if not successful """ if jaguar_api_key == "": jaguar_api_key = self._jag.getApiKey() self._jaguar_api_key = jaguar_api_key self._token = self._jag.login(jaguar_api_key) if self._token == "": logger.error("E0001 error init(): invalid jaguar_api_key") return False return True def create( self, metadata_str: str, text_size: int, ) -> None: """ create the vector store on the backend database Args: metadata_str (str): columns and their types Returns: True if successful; False if not successful """ podstore = self._pod + "." + self._store """ source column is required. v:text column is required. """ q = "create store " q += podstore q += f" ({self._vector_index} vector({self._vector_dimension}," q += f" '{self._vector_type}')," q += f" source char(256), v:text char({text_size})," q += metadata_str + ")" self.run(q) def run(self, query: str, withFile: bool = False) -> dict: """ Run any query statement in jaguardb Args: query (str): query statement to jaguardb Returns: None for invalid token, or json result string """ if self._token == "": logger.error(f"E0005 error run({query})") return {} resp = self._jag.post(query, self._token, withFile) txt = resp.text try: js = json.loads(txt) return js except Exception: return {} @property def embeddings(self) -> Optional[Embeddings]: return self._embedding def add_texts( self, texts: List[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """ Add texts through the embeddings and add to the vectorstore. Args: texts: list of text strings to add to the jaguar vector store. metadatas: Optional list of metadatas associated with the texts. [{"m1": "v11", "m2": "v12", "m3": "v13", "filecol": "path_file1.jpg" }, {"m1": "v21", "m2": "v22", "m3": "v23", "filecol": "path_file2.jpg" }, {"m1": "v31", "m2": "v32", "m3": "v33", "filecol": "path_file3.jpg" }, {"m1": "v41", "m2": "v42", "m3": "v43", "filecol": "path_file4.jpg" }] kwargs: vector_index=name_of_vector_index file_column=name_of_file_column Returns: List of ids from adding the texts into the vectorstore """ vcol = self._vector_index filecol = kwargs.get("file_column", "") podstorevcol = self._pod + "." + self._store + "." + vcol q = "textcol " + podstorevcol js = self.run(q) if js == "": return [] textcol = js["data"] embeddings = self._embedding.embed_documents(list(texts)) ids = [] if metadatas is None: ### no meta and no files to upload i = 0 for vec in embeddings: str_vec = [str(x) for x in vec] values_comma = ",".join(str_vec) podstore = self._pod + "." + self._store q = "insert into " + podstore + " (" q += vcol + "," + textcol + ") values ('" + values_comma q += "','" + texts[i] + "')" js = self.run(q, False) ids.append(js["zid"]) i += 1 else: i = 0 for vec in embeddings: str_vec = [str(x) for x in vec] nvec, vvec, filepath = self._parseMeta(metadatas[i], filecol) if filecol != "": rc = self._jag.postFile(self._token, filepath, 1) if not rc: return [] names_comma = ",".join(nvec) names_comma += "," + vcol ## col1,col2,col3,vecl values_comma = "'" + "','".join(vvec) + "'" ### 'va1','val2','val3' values_comma += ",'" + ",".join(str_vec) + "'" ### 'v1,v2,v3' podstore = self._pod + "." + self._store q = "insert into " + podstore + " (" q += names_comma + "," + textcol + ") values (" + values_comma q += ",'" + texts[i] + "')" if filecol != "": js = self.run(q, True) else: js = self.run(q, False) ids.append(js["zid"]) i += 1 return ids def similarity_search_with_score( self, query: str, k: int = 3, fetch_k: int = -1, where: Optional[str] = None, score_threshold: Optional[float] = -1.0, metadatas: Optional[List[str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Return Jaguar documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 3. lambda_val: lexical match parameter for hybrid search. where: the where clause in select similarity. For example a where can be "rating > 3.0 and (state = 'NV' or state = 'CA')" score_threshold: minimal score threshold for the result. If defined, results with score less than this value will be filtered out. kwargs: vector_index=vcol, vector_type=cosine_fraction_float Returns: List of Documents most similar to the query and score for each. List of Tuples of (doc, similarity_score): [ (doc, score), (doc, score), ...] """ vcol = self._vector_index vtype = self._vector_type embeddings = self._embedding.embed_query(query) str_embeddings = [str(f) for f in embeddings] qv_comma = ",".join(str_embeddings) podstore = self._pod + "." + self._store q = ( "select similarity(" + vcol + ",'" + qv_comma + "','topk=" + str(k) + ",fetch_k=" + str(fetch_k) + ",type=" + vtype ) q += ",with_score=yes,with_text=yes,score_threshold=" + str(score_threshold) if metadatas is not None: meta = "&".join(metadatas) q += ",metadata=" + meta q += "') from " + podstore if where is not None: q += " where " + where jarr = self.run(q) if jarr is None: return [] docs_with_score = [] for js in jarr: score = js["score"] text = js["text"] zid = js["zid"] ### give metadatas md = {} md["zid"] = zid if metadatas is not None: for m in metadatas: mv = js[m] md[m] = mv doc = Document(page_content=text, metadata=md) tup = (doc, score) docs_with_score.append(tup) return docs_with_score def similarity_search( self, query: str, k: int = 3, where: Optional[str] = None, metadatas: Optional[List[str]] = None, **kwargs: Any, ) -> List[Document]: """ Return Jaguar documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. where: the where clause in select similarity. For example a where can be "rating > 3.0 and (state = 'NV' or state = 'CA')" Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query, k=k, where=where, metadatas=metadatas, **kwargs ) return [doc for doc, _ in docs_and_scores] def is_anomalous( self, query: str, **kwargs: Any, ) -> bool: """ Detect if given text is anomalous from the dataset Args: query: Text to detect if it is anomaly Returns: True or False """ vcol = self._vector_index vtype = self._vector_type embeddings = self._embedding.embed_query(query) str_embeddings = [str(f) for f in embeddings] qv_comma = ",".join(str_embeddings) podstore = self._pod + "." + self._store q = "select anomalous(" + vcol + ", '" + qv_comma + "', 'type=" + vtype + "')" q += " from " + podstore js = self.run(q) if isinstance(js, list) and len(js) == 0: return False jd = json.loads(js[0]) if jd["anomalous"] == "YES": return True return False @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, url: str, pod: str, store: str, vector_index: str, vector_type: str, vector_dimension: int, metadatas: Optional[List[dict]] = None, jaguar_api_key: Optional[str] = "", **kwargs: Any, ) -> Jaguar: jagstore = cls( pod, store, vector_index, vector_type, vector_dimension, url, embedding ) jagstore.login(jaguar_api_key) jagstore.clear() jagstore.add_texts(texts, metadatas, **kwargs) return jagstore def clear(self) -> None: """ Delete all records in jaguardb Args: No args Returns: None """ podstore = self._pod + "." + self._store q = "truncate store " + podstore self.run(q) def delete(self, zids: List[str], **kwargs: Any) -> None: """ Delete records in jaguardb by a list of zero-ids Args: pod (str): name of a Pod ids (List[str]): a list of zid as string Returns: Do not return anything """ podstore = self._pod + "." + self._store for zid in zids: q = "delete from " + podstore + " where zid='" + zid + "'" self.run(q) def count(self) -> int: """ Count records of a store in jaguardb Args: no args Returns: (int) number of records in pod store """ podstore = self._pod + "." + self._store q = "select count() from " + podstore js = self.run(q) if isinstance(js, list) and len(js) == 0: return 0 jd = json.loads(js[0]) return int(jd["data"]) def drop(self) -> None: """ Drop or remove a store in jaguardb Args: no args Returns: None """ podstore = self._pod + "." + self._store q = "drop store " + podstore self.run(q) def logout(self) -> None: """ Logout to cleanup resources Args: no args Returns: None """ self._jag.logout(self._token) def prt(self, msg: str) -> None: with open("/tmp/debugjaguar.log", "a") as file: print(f"msg={msg}", file=file, flush=True) def _parseMeta(self, nvmap: dict, filecol: str) -> Tuple[List[str], List[str], str]: filepath = "" if filecol == "": nvec = list(nvmap.keys()) vvec = list(nvmap.values()) else: nvec = [] vvec = [] if filecol in nvmap: nvec.append(filecol) vvec.append(nvmap[filecol]) filepath = nvmap[filecol] for k, v in nvmap.items(): if k != filecol: nvec.append(k) vvec.append(v) return nvec, vvec, filepath
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~clarifai.py
import logging from typing import Dict, List, Optional from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class ClarifaiEmbeddings(BaseModel, Embeddings): """Clarifai embedding models. To use, you should have the ``clarifai`` python package installed, and the environment variable ``CLARIFAI_PAT`` set with your personal access token or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.embeddings import ClarifaiEmbeddings clarifai = ClarifaiEmbeddings(user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID) (or) clarifai_llm = Clarifai(model_url=EXAMPLE_URL) """ model_url: Optional[str] = None """Model url to use.""" model_id: Optional[str] = None """Model id to use.""" model_version_id: Optional[str] = None """Model version id to use.""" app_id: Optional[str] = None """Clarifai application id to use.""" user_id: Optional[str] = None """Clarifai user id to use.""" pat: Optional[str] = None """Clarifai personal access token to use.""" api_base: str = "https://api.clarifai.com" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that we have all required info to access Clarifai platform and python package exists in environment.""" values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT") user_id = values.get("user_id") app_id = values.get("app_id") model_id = values.get("model_id") model_url = values.get("model_url") if model_url is not None and model_id is not None: raise ValueError("Please provide either model_url or model_id, not both.") if model_url is None and model_id is None: raise ValueError("Please provide one of model_url or model_id.") if model_url is None and model_id is not None: if user_id is None or app_id is None: raise ValueError("Please provide a user_id and app_id.") return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Clarifai's embedding models. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from clarifai.client.input import Inputs from clarifai.client.model import Model except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) if self.pat is not None: pat = self.pat if self.model_url is not None: _model_init = Model(url=self.model_url, pat=pat) else: _model_init = Model( model_id=self.model_id, user_id=self.user_id, app_id=self.app_id, pat=pat, ) input_obj = Inputs(pat=pat) batch_size = 32 embeddings = [] try: for i in range(0, len(texts), batch_size): batch = texts[i : i + batch_size] input_batch = [ input_obj.get_text_input(input_id=str(id), raw_text=inp) for id, inp in enumerate(batch) ] predict_response = _model_init.predict(input_batch) embeddings.extend( [ list(output.data.embeddings[0].vector) for output in predict_response.outputs ] ) except Exception as e: logger.error(f"Predict failed, exception: {e}") return embeddings def embed_query(self, text: str) -> List[float]: """Call out to Clarifai's embedding models. Args: text: The text to embed. Returns: Embeddings for the text. """ try: from clarifai.client.model import Model except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) if self.pat is not None: pat = self.pat if self.model_url is not None: _model_init = Model(url=self.model_url, pat=pat) else: _model_init = Model( model_id=self.model_id, user_id=self.user_id, app_id=self.app_id, pat=pat, ) try: predict_response = _model_init.predict_by_bytes( bytes(text, "utf-8"), input_type="text" ) embeddings = [ list(op.data.embeddings[0].vector) for op in predict_response.outputs ] except Exception as e: logger.error(f"Predict failed, exception: {e}") return embeddings[0]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~retrievers~multi_query.py
import asyncio import logging from typing import List, Sequence from libs.core.langchain_core.documents import Document from libs.core.langchain_core.prompts.prompt import PromptTemplate from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from libs.core.langchain_core.retrievers import BaseRetriever from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM from langchain.output_parsers.pydantic import PydanticOutputParser logger = logging.getLogger(__name__) class LineList(BaseModel): """List of lines.""" lines: List[str] = Field(description="Lines of text") """List of lines.""" class LineListOutputParser(PydanticOutputParser): """Output parser for a list of lines.""" def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = text.strip().split("\n") return LineList(lines=lines) # Default prompt DEFAULT_QUERY_PROMPT = PromptTemplate( input_variables=["question"], template="""You are an AI language model assistant. Your task is to generate 3 different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}""", ) def _unique_documents(documents: Sequence[Document]) -> List[Document]: return [doc for i, doc in enumerate(documents) if doc not in documents[:i]] class MultiQueryRetriever(BaseRetriever): """Given a query, use an LLM to write a set of queries. Retrieve docs for each query. Return the unique union of all retrieved docs. """ retriever: BaseRetriever llm_chain: LLMChain verbose: bool = True parser_key: str = "lines" include_original: bool = False """Whether to include the original query in the list of generated queries.""" @classmethod def from_llm( cls, retriever: BaseRetriever, llm: BaseLLM, prompt: PromptTemplate = DEFAULT_QUERY_PROMPT, parser_key: str = "lines", include_original: bool = False, ) -> "MultiQueryRetriever": """Initialize from llm using default template. Args: retriever: retriever to query documents from llm: llm for query generation using DEFAULT_QUERY_PROMPT include_original: Whether to include the original query in the list of generated queries. Returns: MultiQueryRetriever """ output_parser = LineListOutputParser() llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser) return cls( retriever=retriever, llm_chain=llm_chain, parser_key=parser_key, include_original=include_original, ) async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: """Get relevant documents given a user query. Args: question: user query Returns: Unique union of relevant documents from all generated queries """ queries = await self.agenerate_queries(query, run_manager) if self.include_original: queries.append(query) documents = await self.aretrieve_documents(queries, run_manager) return self.unique_union(documents) async def agenerate_queries( self, question: str, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[str]: """Generate queries based upon user input. Args: question: user query Returns: List of LLM generated queries that are similar to the user input """ response = await self.llm_chain.acall( inputs={"question": question}, callbacks=run_manager.get_child() ) lines = getattr(response["text"], self.parser_key, []) if self.verbose: logger.info(f"Generated queries: {lines}") return lines async def aretrieve_documents( self, queries: List[str], run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: """Run all LLM generated queries. Args: queries: query list Returns: List of retrieved Documents """ document_lists = await asyncio.gather( *( self.retriever.aget_relevant_documents( query, callbacks=run_manager.get_child() ) for query in queries ) ) return [doc for docs in document_lists for doc in docs] def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Get relevant documents given a user query. Args: question: user query Returns: Unique union of relevant documents from all generated queries """ queries = self.generate_queries(query, run_manager) if self.include_original: queries.append(query) documents = self.retrieve_documents(queries, run_manager) return self.unique_union(documents) def generate_queries( self, question: str, run_manager: CallbackManagerForRetrieverRun ) -> List[str]: """Generate queries based upon user input. Args: question: user query Returns: List of LLM generated queries that are similar to the user input """ response = self.llm_chain( {"question": question}, callbacks=run_manager.get_child() ) lines = getattr(response["text"], self.parser_key, []) if self.verbose: logger.info(f"Generated queries: {lines}") return lines def retrieve_documents( self, queries: List[str], run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Run all LLM generated queries. Args: queries: query list Returns: List of retrieved Documents """ documents = [] for query in queries: docs = self.retriever.get_relevant_documents( query, callbacks=run_manager.get_child() ) documents.extend(docs) return documents def unique_union(self, documents: List[Document]) -> List[Document]: """Get unique Documents. Args: documents: List of retrieved Documents Returns: List of unique retrieved Documents """ return _unique_documents(documents)
[ "question", "You are an AI language model assistant. Your task is \n to generate 3 different versions of the given user \n question to retrieve relevant documents from a vector database. \n By generating multiple perspectives on the user question, \n your goal is to help the user overcome some of the limitations \n of distance-based similarity search. Provide these alternative \n questions separated by newlines. Original question: {question}" ]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~cache~test_cassandra.py
"""Test Cassandra caches. Requires a running vector-capable Cassandra cluster.""" import os import time from typing import Any, Iterator, Tuple import pytest from libs.core.langchain_core.outputs import Generation, LLMResult from langchain.cache import CassandraCache, CassandraSemanticCache from langchain.globals import get_llm_cache, set_llm_cache from tests.integration_tests.cache.fake_embeddings import FakeEmbeddings from tests.unit_tests.llms.fake_llm import FakeLLM @pytest.fixture(scope="module") def cassandra_connection() -> Iterator[Tuple[Any, str]]: from cassandra.cluster import Cluster keyspace = "langchain_cache_test_keyspace" # get db connection if "CASSANDRA_CONTACT_POINTS" in os.environ: contact_points = os.environ["CONTACT_POINTS"].split(",") cluster = Cluster(contact_points) else: cluster = Cluster() # session = cluster.connect() # ensure keyspace exists session.execute( ( f"CREATE KEYSPACE IF NOT EXISTS {keyspace} " f"WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}" ) ) yield (session, keyspace) def test_cassandra_cache(cassandra_connection: Tuple[Any, str]) -> None: session, keyspace = cassandra_connection cache = CassandraCache(session=session, keyspace=keyspace) set_llm_cache(cache) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) output = llm.generate(["foo"]) print(output) expected_output = LLMResult( generations=[[Generation(text="fizz")]], llm_output={}, ) print(expected_output) assert output == expected_output cache.clear() def test_cassandra_cache_ttl(cassandra_connection: Tuple[Any, str]) -> None: session, keyspace = cassandra_connection cache = CassandraCache(session=session, keyspace=keyspace, ttl_seconds=2) set_llm_cache(cache) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) expected_output = LLMResult( generations=[[Generation(text="fizz")]], llm_output={}, ) output = llm.generate(["foo"]) assert output == expected_output time.sleep(2.5) # entry has expired away. output = llm.generate(["foo"]) assert output != expected_output cache.clear() def test_cassandra_semantic_cache(cassandra_connection: Tuple[Any, str]) -> None: session, keyspace = cassandra_connection sem_cache = CassandraSemanticCache( session=session, keyspace=keyspace, embedding=FakeEmbeddings(), ) set_llm_cache(sem_cache) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) output = llm.generate(["bar"]) # same embedding as 'foo' expected_output = LLMResult( generations=[[Generation(text="fizz")]], llm_output={}, ) assert output == expected_output # clear the cache sem_cache.clear() output = llm.generate(["bar"]) # 'fizz' is erased away now assert output != expected_output sem_cache.clear()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_transformers~doctran_text_extract.py
from typing import Any, List, Optional, Sequence from libs.core.langchain_core.documents import BaseDocumentTransformer, Document from libs.core.langchain_core.utils import get_from_env class DoctranPropertyExtractor(BaseDocumentTransformer): """Extract properties from text documents using doctran. Arguments: properties: A list of the properties to extract. openai_api_key: OpenAI API key. Can also be specified via environment variable ``OPENAI_API_KEY``. Example: .. code-block:: python from langchain_community.document_transformers import DoctranPropertyExtractor properties = [ { "name": "category", "description": "What type of email this is.", "type": "string", "enum": ["update", "action_item", "customer_feedback", "announcement", "other"], "required": True, }, { "name": "mentions", "description": "A list of all people mentioned in this email.", "type": "array", "items": { "name": "full_name", "description": "The full name of the person mentioned.", "type": "string", }, "required": True, }, { "name": "eli5", "description": "Explain this email to me like I'm 5 years old.", "type": "string", "required": True, }, ] # Pass in openai_api_key or set env var OPENAI_API_KEY property_extractor = DoctranPropertyExtractor(properties) transformed_document = await qa_transformer.atransform_documents(documents) """ # noqa: E501 def __init__( self, properties: List[dict], openai_api_key: Optional[str] = None, openai_api_model: Optional[str] = None, ) -> None: self.properties = properties self.openai_api_key = openai_api_key or get_from_env( "openai_api_key", "OPENAI_API_KEY" ) self.openai_api_model = openai_api_model or get_from_env( "openai_api_model", "OPENAI_API_MODEL" ) async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: raise NotImplementedError def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Extracts properties from text documents using doctran.""" try: from doctran import Doctran, ExtractProperty doctran = Doctran( openai_api_key=self.openai_api_key, openai_model=self.openai_api_model ) except ImportError: raise ImportError( "Install doctran to use this parser. (pip install doctran)" ) properties = [ExtractProperty(**property) for property in self.properties] for d in documents: doctran_doc = ( doctran.parse(content=d.page_content) .extract(properties=properties) .execute() ) d.metadata["extracted_properties"] = doctran_doc.extracted_properties return documents
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~agent_toolkits~playwright~toolkit.py
"""Playwright web browser toolkit.""" from __future__ import annotations from typing import TYPE_CHECKING, List, Optional, Type, cast from libs.core.langchain_core.pydantic_v1 import Extra, root_validator from libs.core.langchain_core.tools import BaseTool from langchain_community.agent_toolkits.base import BaseToolkit from langchain_community.tools.playwright.base import ( BaseBrowserTool, lazy_import_playwright_browsers, ) from langchain_community.tools.playwright.click import ClickTool from langchain_community.tools.playwright.current_page import CurrentWebPageTool from langchain_community.tools.playwright.extract_hyperlinks import ( ExtractHyperlinksTool, ) from langchain_community.tools.playwright.extract_text import ExtractTextTool from langchain_community.tools.playwright.get_elements import GetElementsTool from langchain_community.tools.playwright.navigate import NavigateTool from langchain_community.tools.playwright.navigate_back import NavigateBackTool if TYPE_CHECKING: from playwright.async_api import Browser as AsyncBrowser from playwright.sync_api import Browser as SyncBrowser else: try: # We do this so pydantic can resolve the types when instantiating from playwright.async_api import Browser as AsyncBrowser from playwright.sync_api import Browser as SyncBrowser except ImportError: pass class PlayWrightBrowserToolkit(BaseToolkit): """Toolkit for PlayWright browser tools. **Security Note**: This toolkit provides code to control a web-browser. Careful if exposing this toolkit to end-users. The tools in the toolkit are capable of navigating to arbitrary webpages, clicking on arbitrary elements, and extracting arbitrary text and hyperlinks from webpages. Specifically, by default this toolkit allows navigating to: - Any URL (including any internal network URLs) - And local files If exposing to end-users, consider limiting network access to the server that hosts the agent; in addition, consider it is advised to create a custom NavigationTool wht an args_schema that limits the URLs that can be navigated to (e.g., only allow navigating to URLs that start with a particular prefix). Remember to scope permissions to the minimal permissions necessary for the application. If the default tool selection is not appropriate for the application, consider creating a custom toolkit with the appropriate tools. See https://python.langchain.com/docs/security for more information. """ sync_browser: Optional["SyncBrowser"] = None async_browser: Optional["AsyncBrowser"] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator def validate_imports_and_browser_provided(cls, values: dict) -> dict: """Check that the arguments are valid.""" lazy_import_playwright_browsers() if values.get("async_browser") is None and values.get("sync_browser") is None: raise ValueError("Either async_browser or sync_browser must be specified.") return values def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tool_classes: List[Type[BaseBrowserTool]] = [ ClickTool, NavigateTool, NavigateBackTool, ExtractTextTool, ExtractHyperlinksTool, GetElementsTool, CurrentWebPageTool, ] tools = [ tool_cls.from_browser( sync_browser=self.sync_browser, async_browser=self.async_browser ) for tool_cls in tool_classes ] return cast(List[BaseTool], tools) @classmethod def from_browser( cls, sync_browser: Optional[SyncBrowser] = None, async_browser: Optional[AsyncBrowser] = None, ) -> PlayWrightBrowserToolkit: """Instantiate the toolkit.""" # This is to raise a better error than the forward ref ones Pydantic would have lazy_import_playwright_browsers() return cls(sync_browser=sync_browser, async_browser=async_browser)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~tongyi.py
from __future__ import annotations import asyncio import functools import logging from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional, ) from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.llms import BaseLLM from libs.core.langchain_core.outputs import Generation, GenerationChunk, LLMResult from libs.core.langchain_core.pydantic_v1 import Field, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Tongyi) -> Callable[[Any], Any]: min_seconds = 1 max_seconds = 4 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterward return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(HTTPError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def check_response(resp: Any) -> Any: """Check the response from the completion call.""" if resp.status_code == 200: return resp elif resp.status_code in [400, 401]: raise ValueError( f"status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) else: raise HTTPError( f"HTTP error occurred: status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}", response=resp, ) def generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _generate_with_retry(**_kwargs: Any) -> Any: resp = llm.client.call(**_kwargs) return check_response(resp) return _generate_with_retry(**kwargs) def stream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _stream_generate_with_retry(**_kwargs: Any) -> Any: responses = llm.client.call(**_kwargs) for resp in responses: yield check_response(resp) return _stream_generate_with_retry(**kwargs) async def astream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any: """Because the dashscope SDK doesn't provide an async API, we wrap `stream_generate_with_retry` with an async generator.""" class _AioTongyiGenerator: def __init__(self, _llm: Tongyi, **_kwargs: Any): self.generator = stream_generate_with_retry(_llm, **_kwargs) def __aiter__(self) -> AsyncIterator[Any]: return self async def __anext__(self) -> Any: value = await asyncio.get_running_loop().run_in_executor( None, self._safe_next ) if value is not None: return value else: raise StopAsyncIteration def _safe_next(self) -> Any: try: return next(self.generator) except StopIteration: return None async for chunk in _AioTongyiGenerator(llm, **kwargs): yield chunk class Tongyi(BaseLLM): """Tongyi Qwen large language models. To use, you should have the ``dashscope`` python package installed, and the environment variable ``DASHSCOPE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.llms import Tongyi tongyi = tongyi() """ @property def lc_secrets(self) -> Dict[str, str]: return {"dashscope_api_key": "DASHSCOPE_API_KEY"} client: Any #: :meta private: model_name: str = "qwen-plus" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) top_p: float = 0.8 """Total probability mass of tokens to consider at each step.""" dashscope_api_key: Optional[str] = None """Dashscope api key provide by Alibaba Cloud.""" streaming: bool = False """Whether to stream the results or not.""" max_retries: int = 10 """Maximum number of retries to make when generating.""" @property def _llm_type(self) -> str: """Return type of llm.""" return "tongyi" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["dashscope_api_key"] = get_from_dict_or_env( values, "dashscope_api_key", "DASHSCOPE_API_KEY" ) try: import dashscope except ImportError: raise ImportError( "Could not import dashscope python package. " "Please install it with `pip install dashscope`." ) try: values["client"] = dashscope.Generation except AttributeError: raise ValueError( "`dashscope` has no `Generation` attribute, this is likely " "due to an old version of the dashscope package. Try upgrading it " "with `pip install --upgrade dashscope`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Tongyi Qwen API.""" normal_params = { "model": self.model_name, "top_p": self.top_p, "api_key": self.dashscope_api_key, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: return {"model_name": self.model_name, **super()._identifying_params} def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: generations = [] if self.streaming: if len(prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") generation: Optional[GenerationChunk] = None for chunk in self._stream(prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None generations.append([self._chunk_to_generation(generation)]) else: params: Dict[str, Any] = self._invocation_params(stop=stop, **kwargs) for prompt in prompts: completion = generate_with_retry(self, prompt=prompt, **params) generations.append( [Generation(**self._generation_from_qwen_resp(completion))] ) return LLMResult( generations=generations, llm_output={ "model_name": self.model_name, }, ) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: generations = [] if self.streaming: if len(prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") generation: Optional[GenerationChunk] = None async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None generations.append([self._chunk_to_generation(generation)]) else: params: Dict[str, Any] = self._invocation_params(stop=stop, **kwargs) for prompt in prompts: completion = await asyncio.get_running_loop().run_in_executor( None, functools.partial( generate_with_retry, **{"llm": self, "prompt": prompt, **params} ), ) generations.append( [Generation(**self._generation_from_qwen_resp(completion))] ) return LLMResult( generations=generations, llm_output={ "model_name": self.model_name, }, ) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: params: Dict[str, Any] = self._invocation_params( stop=stop, stream=True, **kwargs ) for stream_resp in stream_generate_with_retry(self, prompt=prompt, **params): chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp)) yield chunk if run_manager: run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, ) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: params: Dict[str, Any] = self._invocation_params( stop=stop, stream=True, **kwargs ) async for stream_resp in astream_generate_with_retry( self, prompt=prompt, **params ): chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp)) yield chunk if run_manager: await run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, ) def _invocation_params(self, stop: Any, **kwargs: Any) -> Dict[str, Any]: params = { **self._default_params, **kwargs, } if stop is not None: params["stop"] = stop if params.get("stream"): params["incremental_output"] = True return params @staticmethod def _generation_from_qwen_resp(resp: Any) -> Dict[str, Any]: return dict( text=resp["output"]["text"], generation_info=dict( finish_reason=resp["output"]["finish_reason"], request_id=resp["request_id"], token_usage=dict(resp["usage"]), ), ) @staticmethod def _chunk_to_generation(chunk: GenerationChunk) -> Generation: return Generation( text=chunk.text, generation_info=chunk.generation_info, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_loaders~slack.py
import json import logging import re import zipfile from pathlib import Path from typing import Dict, Iterator, List, Union from libs.core.langchain_core.chat_sessions import ChatSession from libs.core.langchain_core.messages import AIMessage, HumanMessage from langchain_community.chat_loaders.base import BaseChatLoader logger = logging.getLogger(__name__) class SlackChatLoader(BaseChatLoader): """Load `Slack` conversations from a dump zip file.""" def __init__( self, path: Union[str, Path], ): """ Initialize the chat loader with the path to the exported Slack dump zip file. :param path: Path to the exported Slack dump zip file. """ self.zip_path = path if isinstance(path, Path) else Path(path) if not self.zip_path.exists(): raise FileNotFoundError(f"File {self.zip_path} not found") def _load_single_chat_session(self, messages: List[Dict]) -> ChatSession: results: List[Union[AIMessage, HumanMessage]] = [] previous_sender = None for message in messages: if not isinstance(message, dict): continue text = message.get("text", "") timestamp = message.get("ts", "") sender = message.get("user", "") if not sender: continue skip_pattern = re.compile( r"<@U\d+> has joined the channel", flags=re.IGNORECASE ) if skip_pattern.match(text): continue if sender == previous_sender: results[-1].content += "\n\n" + text results[-1].additional_kwargs["events"].append( {"message_time": timestamp} ) else: results.append( HumanMessage( role=sender, content=text, additional_kwargs={ "sender": sender, "events": [{"message_time": timestamp}], }, ) ) previous_sender = sender return ChatSession(messages=results) def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" with zip_file.open(file_path, "r") as f: data = json.load(f) if not isinstance(data, list): raise ValueError(f"Expected list of dictionaries, got {type(data)}") return data def lazy_load(self) -> Iterator[ChatSession]: """ Lazy load the chat sessions from the Slack dump file and yield them in the required format. :return: Iterator of chat sessions containing messages. """ with zipfile.ZipFile(str(self.zip_path), "r") as zip_file: for file_path in zip_file.namelist(): if file_path.endswith(".json"): messages = self._read_json(zip_file, file_path) yield self._load_single_chat_session(messages)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~callbacks~arize_callback.py
from datetime import datetime from typing import Any, Dict, List, Optional from libs.core.langchain_core.agents import AgentAction, AgentFinish from libs.core.langchain_core.callbacks import BaseCallbackHandler from libs.core.langchain_core.outputs import LLMResult from langchain_community.callbacks.utils import import_pandas class ArizeCallbackHandler(BaseCallbackHandler): """Callback Handler that logs to Arize.""" def __init__( self, model_id: Optional[str] = None, model_version: Optional[str] = None, SPACE_KEY: Optional[str] = None, API_KEY: Optional[str] = None, ) -> None: """Initialize callback handler.""" super().__init__() self.model_id = model_id self.model_version = model_version self.space_key = SPACE_KEY self.api_key = API_KEY self.prompt_records: List[str] = [] self.response_records: List[str] = [] self.prediction_ids: List[str] = [] self.pred_timestamps: List[int] = [] self.response_embeddings: List[float] = [] self.prompt_embeddings: List[float] = [] self.prompt_tokens = 0 self.completion_tokens = 0 self.total_tokens = 0 self.step = 0 from arize.pandas.embeddings import EmbeddingGenerator, UseCases from arize.pandas.logger import Client self.generator = EmbeddingGenerator.from_use_case( use_case=UseCases.NLP.SEQUENCE_CLASSIFICATION, model_name="distilbert-base-uncased", tokenizer_max_length=512, batch_size=256, ) self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY) if SPACE_KEY == "SPACE_KEY" or API_KEY == "API_KEY": raise ValueError("❌ CHANGE SPACE AND API KEYS") else: print("✅ Arize client setup done! Now you can start using Arize!") def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: for prompt in prompts: self.prompt_records.append(prompt.replace("\n", "")) def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing.""" pass def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: pd = import_pandas() from arize.utils.types import ( EmbeddingColumnNames, Environments, ModelTypes, Schema, ) # Safe check if 'llm_output' and 'token_usage' exist if response.llm_output and "token_usage" in response.llm_output: self.prompt_tokens = response.llm_output["token_usage"].get( "prompt_tokens", 0 ) self.total_tokens = response.llm_output["token_usage"].get( "total_tokens", 0 ) self.completion_tokens = response.llm_output["token_usage"].get( "completion_tokens", 0 ) else: self.prompt_tokens = ( self.total_tokens ) = self.completion_tokens = 0 # assign default value for generations in response.generations: for generation in generations: prompt = self.prompt_records[self.step] self.step = self.step + 1 prompt_embedding = pd.Series( self.generator.generate_embeddings( text_col=pd.Series(prompt.replace("\n", " ")) ).reset_index(drop=True) ) # Assigning text to response_text instead of response response_text = generation.text.replace("\n", " ") response_embedding = pd.Series( self.generator.generate_embeddings( text_col=pd.Series(generation.text.replace("\n", " ")) ).reset_index(drop=True) ) pred_timestamp = datetime.now().timestamp() # Define the columns and data columns = [ "prediction_ts", "response", "prompt", "response_vector", "prompt_vector", "prompt_token", "completion_token", "total_token", ] data = [ [ pred_timestamp, response_text, prompt, response_embedding[0], prompt_embedding[0], self.prompt_tokens, self.total_tokens, self.completion_tokens, ] ] # Create the DataFrame df = pd.DataFrame(data, columns=columns) # Declare prompt and response columns prompt_columns = EmbeddingColumnNames( vector_column_name="prompt_vector", data_column_name="prompt" ) response_columns = EmbeddingColumnNames( vector_column_name="response_vector", data_column_name="response" ) schema = Schema( timestamp_column_name="prediction_ts", tag_column_names=[ "prompt_token", "completion_token", "total_token", ], prompt_column_names=prompt_columns, response_column_names=response_columns, ) response_from_arize = self.arize_client.log( dataframe=df, schema=schema, model_id=self.model_id, model_version=self.model_version, model_type=ModelTypes.GENERATIVE_LLM, environment=Environments.PRODUCTION, ) if response_from_arize.status_code == 200: print("✅ Successfully logged data to Arize!") else: print(f'❌ Logging failed "{response_from_arize.text}"') def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing.""" pass def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: pass def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing.""" pass def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing.""" pass def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: pass def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing.""" pass def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: pass def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: pass def on_text(self, text: str, **kwargs: Any) -> None: pass def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: pass
[ "prompt_vector", "\n", " " ]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~gmail~get_message.py
import base64 import email from typing import Dict, Optional, Type from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.tools.gmail.base import GmailBaseTool from langchain_community.tools.gmail.utils import clean_email_body class SearchArgsSchema(BaseModel): """Input for GetMessageTool.""" message_id: str = Field( ..., description="The unique ID of the email message, retrieved from a search.", ) class GmailGetMessage(GmailBaseTool): """Tool that gets a message by ID from Gmail.""" name: str = "get_gmail_message" description: str = ( "Use this tool to fetch an email by message ID." " Returns the thread ID, snippet, body, subject, and sender." ) args_schema: Type[SearchArgsSchema] = SearchArgsSchema def _run( self, message_id: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> Dict: """Run the tool.""" query = ( self.api_resource.users() .messages() .get(userId="me", format="raw", id=message_id) ) message_data = query.execute() raw_message = base64.urlsafe_b64decode(message_data["raw"]) email_msg = email.message_from_bytes(raw_message) subject = email_msg["Subject"] sender = email_msg["From"] message_body = "" if email_msg.is_multipart(): for part in email_msg.walk(): ctype = part.get_content_type() cdispo = str(part.get("Content-Disposition")) if ctype == "text/plain" and "attachment" not in cdispo: message_body = part.get_payload(decode=True).decode("utf-8") break else: message_body = email_msg.get_payload(decode=True).decode("utf-8") body = clean_email_body(message_body) return { "id": message_id, "threadId": message_data["threadId"], "snippet": message_data["snippet"], "body": body, "subject": subject, "sender": sender, }
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~docusaurus.py
"""Load Documents from Docusarus Documentation""" from typing import Any, List, Optional from langchain_community.document_loaders.sitemap import SitemapLoader class DocusaurusLoader(SitemapLoader): """Load from Docusaurus Documentation. It leverages the SitemapLoader to loop through the generated pages of a Docusaurus Documentation website and extracts the content by looking for specific HTML tags. By default, the parser searches for the main content of the Docusaurus page, which is normally the <article>. You can also define your own custom HTML tags by providing them as a list, for example: ["div", ".main", "a"]. """ def __init__( self, url: str, custom_html_tags: Optional[List[str]] = None, **kwargs: Any, ): """Initialize DocusaurusLoader Args: url: The base URL of the Docusaurus website. custom_html_tags: Optional custom html tags to extract content from pages. kwargs: Additional args to extend the underlying SitemapLoader, for example: filter_urls, blocksize, meta_function, is_local, continue_on_failure """ if not kwargs.get("is_local"): url = f"{url}/sitemap.xml" self.custom_html_tags = custom_html_tags or ["main article"] super().__init__( url, parsing_function=kwargs.get("parsing_function") or self._parsing_function, **kwargs, ) def _parsing_function(self, content: Any) -> str: """Parses specific elements from a Docusaurus page.""" relevant_elements = content.select(",".join(self.custom_html_tags)) for element in relevant_elements: if element not in relevant_elements: element.decompose() return str(content.get_text())
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~retrievers~re_phraser.py
import logging from typing import List from libs.core.langchain_core.documents import Document from libs.core.langchain_core.prompts.prompt import PromptTemplate from libs.core.langchain_core.retrievers import BaseRetriever from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM logger = logging.getLogger(__name__) # Default template DEFAULT_TEMPLATE = """You are an assistant tasked with taking a natural language \ query from a user and converting it into a query for a vectorstore. \ In this process, you strip out information that is not relevant for \ the retrieval task. Here is the user query: {question}""" # Default prompt DEFAULT_QUERY_PROMPT = PromptTemplate.from_template(DEFAULT_TEMPLATE) class RePhraseQueryRetriever(BaseRetriever): """Given a query, use an LLM to re-phrase it. Then, retrieve docs for the re-phrased query.""" retriever: BaseRetriever llm_chain: LLMChain @classmethod def from_llm( cls, retriever: BaseRetriever, llm: BaseLLM, prompt: PromptTemplate = DEFAULT_QUERY_PROMPT, ) -> "RePhraseQueryRetriever": """Initialize from llm using default template. The prompt used here expects a single input: `question` Args: retriever: retriever to query documents from llm: llm for query generation using DEFAULT_QUERY_PROMPT prompt: prompt template for query generation Returns: RePhraseQueryRetriever """ llm_chain = LLMChain(llm=llm, prompt=prompt) return cls( retriever=retriever, llm_chain=llm_chain, ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Get relevated documents given a user question. Args: query: user question Returns: Relevant documents for re-phrased question """ response = self.llm_chain(query, callbacks=run_manager.get_child()) re_phrased_question = response["text"] logger.info(f"Re-phrased question: {re_phrased_question}") docs = self.retriever.get_relevant_documents( re_phrased_question, callbacks=run_manager.get_child() ) return docs async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: raise NotImplementedError
[ "You are an assistant tasked with taking a natural language query from a user and converting it into a query for a vectorstore. In this process, you strip out information that is not relevant for the retrieval task. Here is the user query: {question}" ]
2024-01-10
mth93/langchain
libs~langchain~langchain~memory~entity.py
import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.messages import BaseMessage, get_buffer_string from libs.core.langchain_core.prompts import BasePromptTemplate from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.utilities.redis import get_client logger = logging.getLogger(__name__) class BaseEntityStore(BaseModel, ABC): """Abstract base class for Entity store.""" @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass class InMemoryEntityStore(BaseEntityStore): """In-memory Entity store.""" store: Dict[str, Optional[str]] = {} def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default) def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value def delete(self, key: str) -> None: del self.store[key] def exists(self, key: str) -> bool: return key in self.store def clear(self) -> None: return self.store.clear() class UpstashRedisEntityStore(BaseEntityStore): """Upstash Redis backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ def __init__( self, session_id: str = "default", url: str = "", token: str = "", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: from upstash_redis import Redis except ImportError: raise ImportError( "Could not import upstash_redis python package. " "Please install it with `pip install upstash_redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = Redis(url=url, token=token) except Exception: logger.error("Upstash Redis instance could not be initiated.") self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: def scan_and_delete(cursor: int) -> int: cursor, keys_to_delete = self.redis_client.scan( cursor, f"{self.full_key_prefix}:*" ) self.redis_client.delete(*keys_to_delete) return cursor cursor = scan_and_delete(0) while cursor != 0: scan_and_delete(cursor) class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = get_client(redis_url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer memory. Extracts named entities from the recent chat history and generates summaries. With a swappable entity store, persisting entities across conversations. Defaults to an in-memory entity store, and can be swapped out for a Redis, SQLite, or other entity store. """ human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT # Cache of recently detected entity names, if any # It is updated when load_memory_variables is called: entity_cache: List[str] = [] # Number of recent message pairs to consider when updating entities: k: int = 3 chat_history_key: str = "history" # Store to manage entity-related data: entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) @property def buffer(self) -> List[BaseMessage]: """Access chat memory messages.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return ["entities", self.chat_history_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """ Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ # Create an LLMChain for predicting entity names from the recent chat history: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) # Generates a comma-separated list of named entities, # e.g. "Jane, White House, UFO" # or "NONE" if no named entities are extracted: output = chain.predict( history=buffer_string, input=inputs[prompt_input_key], ) # If no named entities are extracted, assigns an empty list. if output.strip() == "NONE": entities = [] else: # Make a list of the extracted entities: entities = [w.strip() for w in output.split(",")] # Make a dictionary of entities with summary if exists: entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, "") # Replaces the entity name cache with the most recently discussed entities, # or if no entities were extracted, clears the cache: self.entity_cache = entities # Should we return as message objects or as a string? if self.return_messages: # Get last `k` pair of chat messages: buffer: Any = self.buffer[-self.k * 2 :] else: # Reuse the string we made earlier: buffer = buffer_string return { self.chat_history_key: buffer, "entities": entity_summaries, } def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """ Save context from this conversation history to the entity store. Generates a summary for each entity in the entity cache by prompting the model, and saves these summaries to the entity store. """ super().save_context(inputs, outputs) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) input_data = inputs[prompt_input_key] # Create an LLMChain for predicting entity summarization from the context chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) # Generate new summaries for entities and save them in the entity store for entity in self.entity_cache: # Get existing summary if it exists existing_summary = self.entity_store.get(entity, "") output = chain.predict( summary=existing_summary, entity=entity, history=buffer_string, input=input_data, ) # Save the updated summary to the entity store self.entity_store.set(entity, output.strip()) def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~minimax.py
"""Wrapper around Minimax chat models.""" import logging from typing import Any, Dict, List, Optional, cast from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.messages import ( AIMessage, BaseMessage, HumanMessage, ) from libs.core.langchain_core.outputs import ChatResult from langchain_community.llms.minimax import MinimaxCommon from langchain_community.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) def _parse_message(msg_type: str, text: str) -> Dict: return {"sender_type": msg_type, "text": text} def _parse_chat_history(history: List[BaseMessage]) -> List: """Parse a sequence of messages into history.""" chat_history = [] for message in history: content = cast(str, message.content) if isinstance(message, HumanMessage): chat_history.append(_parse_message("USER", content)) if isinstance(message, AIMessage): chat_history.append(_parse_message("BOT", content)) return chat_history class MiniMaxChat(MinimaxCommon, BaseChatModel): """Wrapper around Minimax large language models. To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and ``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.chat_models import MiniMaxChat llm = MiniMaxChat(model_name="abab5-chat") """ def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. Code chat does not support context. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ if not messages: raise ValueError( "You should provide at least one message to start the chat!" ) history = _parse_chat_history(messages) payload = self._default_params payload["messages"] = history text = self._client.post(payload) # This is required since the stop are not enforced by the model parameters return text if stop is None else enforce_stop_tokens(text, stop) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: raise NotImplementedError( """Minimax AI doesn't support async requests at the moment.""" )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~arcgis_loader.py
"""Document Loader for ArcGIS FeatureLayers.""" from __future__ import annotations import json import re import warnings from datetime import datetime, timezone from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Union from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader if TYPE_CHECKING: import arcgis _NOT_PROVIDED = "(Not Provided)" class ArcGISLoader(BaseLoader): """Load records from an ArcGIS FeatureLayer.""" def __init__( self, layer: Union[str, arcgis.features.FeatureLayer], gis: Optional[arcgis.gis.GIS] = None, where: str = "1=1", out_fields: Optional[Union[List[str], str]] = None, return_geometry: bool = False, result_record_count: Optional[int] = None, lyr_desc: Optional[str] = None, **kwargs: Any, ): try: import arcgis except ImportError as e: raise ImportError( "arcgis is required to use the ArcGIS Loader. " "Install it with pip or conda." ) from e try: from bs4 import BeautifulSoup # type: ignore self.BEAUTIFULSOUP = BeautifulSoup except ImportError: warnings.warn("BeautifulSoup not found. HTML will not be parsed.") self.BEAUTIFULSOUP = None self.gis = gis or arcgis.gis.GIS() if isinstance(layer, str): self.url = layer self.layer = arcgis.features.FeatureLayer(layer, gis=gis) else: self.url = layer.url self.layer = layer self.layer_properties = self._get_layer_properties(lyr_desc) self.where = where if isinstance(out_fields, str): self.out_fields = out_fields elif out_fields is None: self.out_fields = "*" else: self.out_fields = ",".join(out_fields) self.return_geometry = return_geometry self.result_record_count = result_record_count self.return_all_records = not isinstance(result_record_count, int) query_params = dict( where=self.where, out_fields=self.out_fields, return_geometry=self.return_geometry, return_all_records=self.return_all_records, result_record_count=self.result_record_count, ) query_params.update(kwargs) self.query_params = query_params def _get_layer_properties(self, lyr_desc: Optional[str] = None) -> dict: """Get the layer properties from the FeatureLayer.""" import arcgis layer_number_pattern = re.compile(r"/\d+$") props = self.layer.properties if lyr_desc is None: # retrieve description from the FeatureLayer if not provided try: if self.BEAUTIFULSOUP: lyr_desc = self.BEAUTIFULSOUP(props["description"]).text else: lyr_desc = props["description"] lyr_desc = lyr_desc or _NOT_PROVIDED except KeyError: lyr_desc = _NOT_PROVIDED try: item_id = props["serviceItemId"] item = self.gis.content.get(item_id) or arcgis.features.FeatureLayer( re.sub(layer_number_pattern, "", self.url), ) try: raw_desc = item.description except AttributeError: raw_desc = item.properties.description if self.BEAUTIFULSOUP: item_desc = self.BEAUTIFULSOUP(raw_desc).text else: item_desc = raw_desc item_desc = item_desc or _NOT_PROVIDED except KeyError: item_desc = _NOT_PROVIDED return { "layer_description": lyr_desc, "item_description": item_desc, "layer_properties": props, } def lazy_load(self) -> Iterator[Document]: """Lazy load records from FeatureLayer.""" query_response = self.layer.query(**self.query_params) features = (feature.as_dict for feature in query_response) for feature in features: attributes = feature["attributes"] page_content = json.dumps(attributes) metadata = { "accessed": f"{datetime.now(timezone.utc).isoformat()}Z", "name": self.layer_properties["layer_properties"]["name"], "url": self.url, "layer_description": self.layer_properties["layer_description"], "item_description": self.layer_properties["item_description"], "layer_properties": self.layer_properties["layer_properties"], } if self.return_geometry: try: metadata["geometry"] = feature["geometry"] except KeyError: warnings.warn( "Geometry could not be retrieved from the feature layer." ) yield Document(page_content=page_content, metadata=metadata) def load(self) -> List[Document]: """Load all records from FeatureLayer.""" return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~ifixit.py
from typing import List, Optional import requests from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.document_loaders.web_base import WebBaseLoader IFIXIT_BASE_URL = "https://www.ifixit.com/api/2.0" class IFixitLoader(BaseLoader): """Load `iFixit` repair guides, device wikis and answers. iFixit is the largest, open repair community on the web. The site contains nearly 100k repair manuals, 200k Questions & Answers on 42k devices, and all the data is licensed under CC-BY. This loader will allow you to download the text of a repair guide, text of Q&A's and wikis from devices on iFixit using their open APIs and web scraping. """ def __init__(self, web_path: str): """Initialize with a web path.""" if not web_path.startswith("https://www.ifixit.com"): raise ValueError("web path must start with 'https://www.ifixit.com'") path = web_path.replace("https://www.ifixit.com", "") allowed_paths = ["/Device", "/Guide", "/Answers", "/Teardown"] """ TODO: Add /Wiki """ if not any(path.startswith(allowed_path) for allowed_path in allowed_paths): raise ValueError( "web path must start with /Device, /Guide, /Teardown or /Answers" ) pieces = [x for x in path.split("/") if x] """Teardowns are just guides by a different name""" self.page_type = pieces[0] if pieces[0] != "Teardown" else "Guide" if self.page_type == "Guide" or self.page_type == "Answers": self.id = pieces[2] else: self.id = pieces[1] self.web_path = web_path def load(self) -> List[Document]: if self.page_type == "Device": return self.load_device() elif self.page_type == "Guide" or self.page_type == "Teardown": return self.load_guide() elif self.page_type == "Answers": return self.load_questions_and_answers() else: raise ValueError("Unknown page type: " + self.page_type) @staticmethod def load_suggestions(query: str = "", doc_type: str = "all") -> List[Document]: """Load suggestions. Args: query: A query string doc_type: The type of document to search for. Can be one of "all", "device", "guide", "teardown", "answer", "wiki". Returns: """ res = requests.get( IFIXIT_BASE_URL + "/suggest/" + query + "?doctypes=" + doc_type ) if res.status_code != 200: raise ValueError( 'Could not load suggestions for "' + query + '"\n' + res.json() ) data = res.json() results = data["results"] output = [] for result in results: try: loader = IFixitLoader(result["url"]) if loader.page_type == "Device": output += loader.load_device(include_guides=False) else: output += loader.load() except ValueError: continue return output def load_questions_and_answers( self, url_override: Optional[str] = None ) -> List[Document]: """Load a list of questions and answers. Args: url_override: A URL to override the default URL. Returns: List[Document] """ loader = WebBaseLoader(self.web_path if url_override is None else url_override) soup = loader.scrape() output = [] title = soup.find("h1", "post-title").text output.append("# " + title) output.append(soup.select_one(".post-content .post-text").text.strip()) answersHeader = soup.find("div", "post-answers-header") if answersHeader: output.append("\n## " + answersHeader.text.strip()) for answer in soup.select(".js-answers-list .post.post-answer"): if answer.has_attr("itemprop") and "acceptedAnswer" in answer["itemprop"]: output.append("\n### Accepted Answer") elif "post-helpful" in answer["class"]: output.append("\n### Most Helpful Answer") else: output.append("\n### Other Answer") output += [ a.text.strip() for a in answer.select(".post-content .post-text") ] output.append("\n") text = "\n".join(output).strip() metadata = {"source": self.web_path, "title": title} return [Document(page_content=text, metadata=metadata)] def load_device( self, url_override: Optional[str] = None, include_guides: bool = True ) -> List[Document]: """Loads a device Args: url_override: A URL to override the default URL. include_guides: Whether to include guides linked to from the device. Defaults to True. Returns: """ documents = [] if url_override is None: url = IFIXIT_BASE_URL + "/wikis/CATEGORY/" + self.id else: url = url_override res = requests.get(url) data = res.json() text = "\n".join( [ data[key] for key in ["title", "description", "contents_raw"] if key in data ] ).strip() metadata = {"source": self.web_path, "title": data["title"]} documents.append(Document(page_content=text, metadata=metadata)) if include_guides: """Load and return documents for each guide linked to from the device""" guide_urls = [guide["url"] for guide in data["guides"]] for guide_url in guide_urls: documents.append(IFixitLoader(guide_url).load()[0]) return documents def load_guide(self, url_override: Optional[str] = None) -> List[Document]: """Load a guide Args: url_override: A URL to override the default URL. Returns: List[Document] """ if url_override is None: url = IFIXIT_BASE_URL + "/guides/" + self.id else: url = url_override res = requests.get(url) if res.status_code != 200: raise ValueError( "Could not load guide: " + self.web_path + "\n" + res.json() ) data = res.json() doc_parts = ["# " + data["title"], data["introduction_raw"]] doc_parts.append("\n\n###Tools Required:") if len(data["tools"]) == 0: doc_parts.append("\n - None") else: for tool in data["tools"]: doc_parts.append("\n - " + tool["text"]) doc_parts.append("\n\n###Parts Required:") if len(data["parts"]) == 0: doc_parts.append("\n - None") else: for part in data["parts"]: doc_parts.append("\n - " + part["text"]) for row in data["steps"]: doc_parts.append( "\n\n## " + ( row["title"] if row["title"] != "" else "Step {}".format(row["orderby"]) ) ) for line in row["lines"]: doc_parts.append(line["text_raw"]) doc_parts.append(data["conclusion_raw"]) text = "\n".join(doc_parts) metadata = {"source": self.web_path, "title": data["title"]} return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~smith~evaluation~progress.py
"""A simple progress bar for the console.""" import threading from typing import Any, Dict, Optional, Sequence from uuid import UUID from libs.core.langchain_core.documents import Document from libs.core.langchain_core.outputs import LLMResult from langchain.callbacks import base as base_callbacks class ProgressBarCallback(base_callbacks.BaseCallbackHandler): """A simple progress bar for the console.""" def __init__(self, total: int, ncols: int = 50, **kwargs: Any): """Initialize the progress bar. Args: total: int, the total number of items to be processed. ncols: int, the character width of the progress bar. """ self.total = total self.ncols = ncols self.counter = 0 self.lock = threading.Lock() self._print_bar() def increment(self) -> None: """Increment the counter and update the progress bar.""" with self.lock: self.counter += 1 self._print_bar() def _print_bar(self) -> None: """Print the progress bar to the console.""" progress = self.counter / self.total arrow = "-" * int(round(progress * self.ncols) - 1) + ">" spaces = " " * (self.ncols - len(arrow)) print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end="") def on_chain_error( self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if parent_run_id is None: self.increment() def on_chain_end( self, outputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if parent_run_id is None: self.increment() def on_retriever_error( self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if parent_run_id is None: self.increment() def on_retriever_end( self, documents: Sequence[Document], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if parent_run_id is None: self.increment() def on_llm_error( self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if parent_run_id is None: self.increment() def on_llm_end( self, response: LLMResult, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if parent_run_id is None: self.increment() def on_tool_error( self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if parent_run_id is None: self.increment() def on_tool_end( self, output: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if parent_run_id is None: self.increment()
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_timescalevector.py
"""Test TimescaleVector functionality.""" import os from datetime import datetime, timedelta from typing import List from libs.core.langchain_core.documents import Document from langchain_community.vectorstores.timescalevector import TimescaleVector from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings SERVICE_URL = TimescaleVector.service_url_from_db_params( host=os.environ.get("TEST_TIMESCALE_HOST", "localhost"), port=int(os.environ.get("TEST_TIMESCALE_PORT", "5432")), database=os.environ.get("TEST_TIMESCALE_DATABASE", "postgres"), user=os.environ.get("TEST_TIMESCALE_USER", "postgres"), password=os.environ.get("TEST_TIMESCALE_PASSWORD", "postgres"), ) ADA_TOKEN_COUNT = 1536 class FakeEmbeddingsWithAdaDimension(FakeEmbeddings): """Fake embeddings functionality for testing.""" def embed_documents(self, texts: List[str]) -> List[List[float]]: """Return simple embeddings.""" return [ [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts)) ] def embed_query(self, text: str) -> List[float]: """Return simple embeddings.""" return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)] def test_timescalevector() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")] def test_timescalevector_from_documents() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts] docsearch = TimescaleVector.from_documents( documents=docs, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"a": "b"})] async def test_timescalevector_afrom_documents() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts] docsearch = await TimescaleVector.afrom_documents( documents=docs, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True, ) output = await docsearch.asimilarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"a": "b"})] def test_timescalevector_embeddings() -> None: """Test end to end construction with embeddings and search.""" texts = ["foo", "bar", "baz"] text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) docsearch = TimescaleVector.from_embeddings( text_embeddings=text_embedding_pairs, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")] async def test_timescalevector_aembeddings() -> None: """Test end to end construction with embeddings and search.""" texts = ["foo", "bar", "baz"] text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) docsearch = await TimescaleVector.afrom_embeddings( text_embeddings=text_embedding_pairs, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True, ) output = await docsearch.asimilarity_search("foo", k=1) assert output == [Document(page_content="foo")] def test_timescalevector_with_metadatas() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"page": "0"})] def test_timescalevector_with_metadatas_with_scores() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search_with_score("foo", k=1) assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] async def test_timescalevector_awith_metadatas_with_scores() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = await TimescaleVector.afrom_texts( texts=texts, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = await docsearch.asimilarity_search_with_score("foo", k=1) assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] def test_timescalevector_with_filter_match() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection_filter", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"}) assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] def test_timescalevector_with_filter_distant_match() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection_filter", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"}) assert output == [ (Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406) ] def test_timescalevector_with_filter_no_match() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection_filter", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"}) assert output == [] def test_timescalevector_with_filter_in_set() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection_filter", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search_with_score( "foo", k=2, filter=[{"page": "0"}, {"page": "2"}] ) assert output == [ (Document(page_content="foo", metadata={"page": "0"}), 0.0), (Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406), ] def test_timescalevector_relevance_score() -> None: """Test to make sure the relevance score is scaled to 0-1.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = docsearch.similarity_search_with_relevance_scores("foo", k=3) assert output == [ (Document(page_content="foo", metadata={"page": "0"}), 1.0), (Document(page_content="bar", metadata={"page": "1"}), 0.9996744261675065), (Document(page_content="baz", metadata={"page": "2"}), 0.9986996093328621), ] async def test_timescalevector_relevance_score_async() -> None: """Test to make sure the relevance score is scaled to 0-1.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = await TimescaleVector.afrom_texts( texts=texts, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) output = await docsearch.asimilarity_search_with_relevance_scores("foo", k=3) assert output == [ (Document(page_content="foo", metadata={"page": "0"}), 1.0), (Document(page_content="bar", metadata={"page": "1"}), 0.9996744261675065), (Document(page_content="baz", metadata={"page": "2"}), 0.9986996093328621), ] def test_timescalevector_retriever_search_threshold() -> None: """Test using retriever for searching with threshold.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, ) retriever = docsearch.as_retriever( search_type="similarity_score_threshold", search_kwargs={"k": 3, "score_threshold": 0.999}, ) output = retriever.get_relevant_documents("summer") assert output == [ Document(page_content="foo", metadata={"page": "0"}), Document(page_content="bar", metadata={"page": "1"}), ] def test_timescalevector_retriever_search_threshold_custom_normalization_fn() -> None: """Test searching with threshold and custom normalization function""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts( texts=texts, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True, relevance_score_fn=lambda d: d * 0, ) retriever = docsearch.as_retriever( search_type="similarity_score_threshold", search_kwargs={"k": 3, "score_threshold": 0.5}, ) output = retriever.get_relevant_documents("foo") assert output == [] def test_timescalevector_delete() -> None: """Test deleting functionality.""" texts = ["bar", "baz"] docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts] docsearch = TimescaleVector.from_documents( documents=docs, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True, ) texts = ["foo"] meta = [{"b": "c"}] ids = docsearch.add_texts(texts, meta) output = docsearch.similarity_search("bar", k=10) assert len(output) == 3 docsearch.delete(ids) output = docsearch.similarity_search("bar", k=10) assert len(output) == 2 docsearch.delete_by_metadata({"a": "b"}) output = docsearch.similarity_search("bar", k=10) assert len(output) == 0 def test_timescalevector_with_index() -> None: """Test deleting functionality.""" texts = ["bar", "baz"] docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts] docsearch = TimescaleVector.from_documents( documents=docs, collection_name="test_collection", embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True, ) texts = ["foo"] meta = [{"b": "c"}] docsearch.add_texts(texts, meta) docsearch.create_index() output = docsearch.similarity_search("bar", k=10) assert len(output) == 3 docsearch.drop_index() docsearch.create_index( index_type=TimescaleVector.IndexType.TIMESCALE_VECTOR, max_alpha=1.0, num_neighbors=50, ) docsearch.drop_index() docsearch.create_index("tsv", max_alpha=1.0, num_neighbors=50) docsearch.drop_index() docsearch.create_index("ivfflat", num_lists=20, num_records=1000) docsearch.drop_index() docsearch.create_index("hnsw", m=16, ef_construction=64) def test_timescalevector_time_partitioning() -> None: """Test deleting functionality.""" from timescale_vector import client texts = ["bar", "baz"] docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts] docsearch = TimescaleVector.from_documents( documents=docs, collection_name="test_collection_time_partitioning", embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True, time_partition_interval=timedelta(hours=1), ) texts = ["foo"] meta = [{"b": "c"}] ids = [client.uuid_from_time(datetime.now() - timedelta(hours=3))] docsearch.add_texts(texts, meta, ids) output = docsearch.similarity_search("bar", k=10) assert len(output) == 3 output = docsearch.similarity_search( "bar", k=10, start_date=datetime.now() - timedelta(hours=1) ) assert len(output) == 2 output = docsearch.similarity_search( "bar", k=10, end_date=datetime.now() - timedelta(hours=1) ) assert len(output) == 1 output = docsearch.similarity_search( "bar", k=10, start_date=datetime.now() - timedelta(minutes=200) ) assert len(output) == 3 output = docsearch.similarity_search( "bar", k=10, start_date=datetime.now() - timedelta(minutes=200), time_delta=timedelta(hours=1), ) assert len(output) == 1
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~llms~test_qianfan_endpoint.py
"""Test Baidu Qianfan LLM Endpoint.""" from typing import Generator from libs.core.langchain_core.outputs import LLMResult from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint def test_call() -> None: """Test valid call to qianfan.""" llm = QianfanLLMEndpoint() output = llm("write a joke") assert isinstance(output, str) def test_generate() -> None: """Test valid call to qianfan.""" llm = QianfanLLMEndpoint() output = llm.generate(["write a joke"]) assert isinstance(output, LLMResult) assert isinstance(output.generations, list) def test_generate_stream() -> None: """Test valid call to qianfan.""" llm = QianfanLLMEndpoint() output = llm.stream("write a joke") assert isinstance(output, Generator) async def test_qianfan_aio() -> None: llm = QianfanLLMEndpoint(streaming=True) async for token in llm.astream("hi qianfan."): assert isinstance(token, str)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~mlflow.py
from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from urllib.parse import urlparse from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models import LLM from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field, PrivateAttr # Ignoring type because below is valid pydantic code # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class Params(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Parameters for MLflow""" temperature: float = 0.0 n: int = 1 stop: Optional[List[str]] = None max_tokens: Optional[int] = None class Mlflow(LLM): """Wrapper around completions LLMs in MLflow. To use, you should have the `mlflow[genai]` python package installed. For more information, see https://mlflow.org/docs/latest/llms/deployments/server.html. Example: .. code-block:: python from langchain_community.llms import Mlflow completions = Mlflow( target_uri="http://localhost:5000", endpoint="test", params={"temperature": 0.1} ) """ endpoint: str """The endpoint to use.""" target_uri: str """The target URI to use.""" temperature: float = 0.0 """The sampling temperature.""" n: int = 1 """The number of completion choices to generate.""" stop: Optional[List[str]] = None """The stop sequence.""" max_tokens: Optional[int] = None """The maximum number of tokens to generate.""" extra_params: Dict[str, Any] = Field(default_factory=dict) """Any extra parameters to pass to the endpoint.""" """Extra parameters such as `temperature`.""" _client: Any = PrivateAttr() def __init__(self, **kwargs: Any): super().__init__(**kwargs) self._validate_uri() try: from mlflow.deployments import get_deploy_client self._client = get_deploy_client(self.target_uri) except ImportError as e: raise ImportError( "Failed to create the client. " "Please run `pip install mlflow[genai]` to install " "required dependencies." ) from e def _validate_uri(self) -> None: if self.target_uri == "databricks": return allowed = ["http", "https", "databricks"] if urlparse(self.target_uri).scheme not in allowed: raise ValueError( f"Invalid target URI: {self.target_uri}. " f"The scheme must be one of {allowed}." ) @property def _default_params(self) -> Dict[str, Any]: return { "target_uri": self.target_uri, "endpoint": self.endpoint, "temperature": self.temperature, "n": self.n, "stop": self.stop, "max_tokens": self.max_tokens, "extra_params": self.extra_params, } @property def _identifying_params(self) -> Mapping[str, Any]: return self._default_params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: data: Dict[str, Any] = { "prompt": prompt, "temperature": self.temperature, "n": self.n, **self.extra_params, **kwargs, } if stop := self.stop or stop: data["stop"] = stop if self.max_tokens is not None: data["max_tokens"] = self.max_tokens resp = self._client.predict(endpoint=self.endpoint, inputs=data) return resp["choices"][0]["text"] @property def _llm_type(self) -> str: return "mlflow"
[]
2024-01-10
mth93/langchain
libs~community~tests~unit_tests~callbacks~test_callback_manager.py
"""Test CallbackManager.""" from unittest.mock import patch import pytest from libs.core.langchain_core.callbacks.manager import CallbackManager, trace_as_chain_group from libs.core.langchain_core.outputs import LLMResult from libs.core.langchain_core.tracers.langchain import LangChainTracer, wait_for_all_tracers from langchain_community.callbacks import get_openai_callback from langchain_community.llms.openai import BaseOpenAI def test_callback_manager_configure_context_vars( monkeypatch: pytest.MonkeyPatch, ) -> None: """Test callback manager configuration.""" monkeypatch.setenv("LANGCHAIN_TRACING_V2", "true") monkeypatch.setenv("LANGCHAIN_TRACING", "false") with patch.object(LangChainTracer, "_update_run_single"): with patch.object(LangChainTracer, "_persist_run_single"): with trace_as_chain_group("test") as group_manager: assert len(group_manager.handlers) == 1 tracer = group_manager.handlers[0] assert isinstance(tracer, LangChainTracer) with get_openai_callback() as cb: # This is a new empty callback handler assert cb.successful_requests == 0 assert cb.total_tokens == 0 # configure adds this openai cb but doesn't modify the group manager mngr = CallbackManager.configure(group_manager) assert mngr.handlers == [tracer, cb] assert group_manager.handlers == [tracer] response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": BaseOpenAI.__fields__["model_name"].default, }, ) mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response) # The callback handler has been updated assert cb.successful_requests == 1 assert cb.total_tokens == 3 assert cb.prompt_tokens == 2 assert cb.completion_tokens == 1 assert cb.total_cost > 0 with get_openai_callback() as cb: # This is a new empty callback handler assert cb.successful_requests == 0 assert cb.total_tokens == 0 # configure adds this openai cb but doesn't modify the group manager mngr = CallbackManager.configure(group_manager) assert mngr.handlers == [tracer, cb] assert group_manager.handlers == [tracer] response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": BaseOpenAI.__fields__["model_name"].default, }, ) mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response) # The callback handler has been updated assert cb.successful_requests == 1 assert cb.total_tokens == 3 assert cb.prompt_tokens == 2 assert cb.completion_tokens == 1 assert cb.total_cost > 0 wait_for_all_tracers() assert LangChainTracer._persist_run_single.call_count == 1 # type: ignore
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~volcengine_maas.py
from __future__ import annotations from typing import Any, Dict, Iterator, List, Optional from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.outputs import GenerationChunk from libs.core.langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env class VolcEngineMaasBase(BaseModel): """Base class for VolcEngineMaas models.""" client: Any volc_engine_maas_ak: Optional[SecretStr] = None """access key for volc engine""" volc_engine_maas_sk: Optional[SecretStr] = None """secret key for volc engine""" endpoint: Optional[str] = "maas-api.ml-platform-cn-beijing.volces.com" """Endpoint of the VolcEngineMaas LLM.""" region: Optional[str] = "Region" """Region of the VolcEngineMaas LLM.""" model: str = "skylark-lite-public" """Model name. you could check this model details here https://www.volcengine.com/docs/82379/1133187 and you could choose other models by change this field""" model_version: Optional[str] = None """Model version. Only used in moonshot large language model. you could check details here https://www.volcengine.com/docs/82379/1158281""" top_p: Optional[float] = 0.8 """Total probability mass of tokens to consider at each step.""" temperature: Optional[float] = 0.95 """A non-negative float that tunes the degree of randomness in generation.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """model special arguments, you could check detail on model page""" streaming: bool = False """Whether to stream the results.""" connect_timeout: Optional[int] = 60 """Timeout for connect to volc engine maas endpoint. Default is 60 seconds.""" read_timeout: Optional[int] = 60 """Timeout for read response from volc engine maas endpoint. Default is 60 seconds.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: volc_engine_maas_ak = convert_to_secret_str( get_from_dict_or_env(values, "volc_engine_maas_ak", "VOLC_ACCESSKEY") ) volc_engine_maas_sk = convert_to_secret_str( get_from_dict_or_env(values, "volc_engine_maas_sk", "VOLC_SECRETKEY") ) endpoint = values["endpoint"] if values["endpoint"] is not None and values["endpoint"] != "": endpoint = values["endpoint"] try: from volcengine.maas import MaasService maas = MaasService( endpoint, values["region"], connection_timeout=values["connect_timeout"], socket_timeout=values["read_timeout"], ) maas.set_ak(volc_engine_maas_ak.get_secret_value()) maas.set_sk(volc_engine_maas_sk.get_secret_value()) values["volc_engine_maas_ak"] = volc_engine_maas_ak values["volc_engine_maas_sk"] = volc_engine_maas_sk values["client"] = maas except ImportError: raise ImportError( "volcengine package not found, please install it with " "`pip install volcengine`" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling VolcEngineMaas API.""" normal_params = { "top_p": self.top_p, "temperature": self.temperature, } return {**normal_params, **self.model_kwargs} class VolcEngineMaasLLM(LLM, VolcEngineMaasBase): """volc engine maas hosts a plethora of models. You can utilize these models through this class. To use, you should have the ``volcengine`` python package installed. and set access key and secret key by environment variable or direct pass those to this class. access key, secret key are required parameters which you could get help https://www.volcengine.com/docs/6291/65568 In order to use them, it is necessary to install the 'volcengine' Python package. The access key and secret key must be set either via environment variables or passed directly to this class. access key and secret key are mandatory parameters for which assistance can be sought at https://www.volcengine.com/docs/6291/65568. Example: .. code-block:: python from langchain_community.llms import VolcEngineMaasLLM model = VolcEngineMaasLLM(model="skylark-lite-public", volc_engine_maas_ak="your_ak", volc_engine_maas_sk="your_sk") """ @property def _llm_type(self) -> str: """Return type of llm.""" return "volc-engine-maas-llm" def _convert_prompt_msg_params( self, prompt: str, **kwargs: Any, ) -> dict: model_req = { "model": { "name": self.model, } } if self.model_version is not None: model_req["model"]["version"] = self.model_version return { **model_req, "messages": [{"role": "user", "content": prompt}], "parameters": {**self._default_params, **kwargs}, } def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if self.streaming: completion = "" for chunk in self._stream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completion params = self._convert_prompt_msg_params(prompt, **kwargs) response = self.client.chat(params) return response.get("choice", {}).get("message", {}).get("content", "") def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: params = self._convert_prompt_msg_params(prompt, **kwargs) for res in self.client.stream_chat(params): if res: chunk = GenerationChunk( text=res.get("choice", {}).get("message", {}).get("content", "") ) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~sitemap.py
import itertools import re from typing import Any, Callable, Generator, Iterable, List, Optional, Tuple from urllib.parse import urlparse from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.web_base import WebBaseLoader def _default_parsing_function(content: Any) -> str: return str(content.get_text()) def _default_meta_function(meta: dict, _content: Any) -> dict: return {"source": meta["loc"], **meta} def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]: it = iter(iterable) while item := list(itertools.islice(it, size)): yield item def _extract_scheme_and_domain(url: str) -> Tuple[str, str]: """Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc class SitemapLoader(WebBaseLoader): """Load a sitemap and its URLs. **Security Note**: This loader can be used to load all URLs specified in a sitemap. If a malicious actor gets access to the sitemap, they could force the server to load URLs from other domains by modifying the sitemap. This could lead to server-side request forgery (SSRF) attacks; e.g., with the attacker forcing the server to load URLs from internal service endpoints that are not publicly accessible. While the attacker may not immediately gain access to this data, this data could leak into downstream systems (e.g., data loader is used to load data for indexing). This loader is a crawler and web crawlers should generally NOT be deployed with network access to any internal servers. Control access to who can submit crawling requests and what network access the crawler has. By default, the loader will only load URLs from the same domain as the sitemap if the site map is not a local file. This can be disabled by setting restrict_to_same_domain to False (not recommended). If the site map is a local file, no such risk mitigation is applied by default. Use the filter URLs argument to limit which URLs can be loaded. See https://python.langchain.com/docs/security """ def __init__( self, web_path: str, filter_urls: Optional[List[str]] = None, parsing_function: Optional[Callable] = None, blocksize: Optional[int] = None, blocknum: int = 0, meta_function: Optional[Callable] = None, is_local: bool = False, continue_on_failure: bool = False, restrict_to_same_domain: bool = True, **kwargs: Any, ): """Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: a list of regexes. If specified, only URLS that match one of the filter URLs will be loaded. *WARNING* The filter URLs are interpreted as regular expressions. Remember to escape special characters if you do not want them to be interpreted as regular expression syntax. For example, `.` appears frequently in URLs and should be escaped if you want to match a literal `.` rather than any character. restrict_to_same_domain takes precedence over filter_urls when restrict_to_same_domain is True and the sitemap is not a local file. parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed. Default: 0 meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file. Default: False continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False restrict_to_same_domain: whether to restrict loading to URLs to the same domain as the sitemap. Attention: This is only applied if the sitemap is not a local file! """ if blocksize is not None and blocksize < 1: raise ValueError("Sitemap blocksize should be at least 1") if blocknum < 0: raise ValueError("Sitemap blocknum can not be lower then 0") try: import lxml # noqa:F401 except ImportError: raise ImportError( "lxml package not found, please install it with `pip install lxml`" ) super().__init__(web_paths=[web_path], **kwargs) # Define a list of URL patterns (interpreted as regular expressions) that # will be allowed to be loaded. # restrict_to_same_domain takes precedence over filter_urls when # restrict_to_same_domain is True and the sitemap is not a local file. self.allow_url_patterns = filter_urls self.restrict_to_same_domain = restrict_to_same_domain self.parsing_function = parsing_function or _default_parsing_function self.meta_function = meta_function or _default_meta_function self.blocksize = blocksize self.blocknum = blocknum self.is_local = is_local self.continue_on_failure = continue_on_failure def parse_sitemap(self, soup: Any) -> List[dict]: """Parse sitemap xml and load into a list of dicts. Args: soup: BeautifulSoup object. Returns: List of dicts. """ els = [] for url in soup.find_all("url"): loc = url.find("loc") if not loc: continue # Strip leading and trailing whitespace and newlines loc_text = loc.text.strip() if self.restrict_to_same_domain and not self.is_local: if _extract_scheme_and_domain(loc_text) != _extract_scheme_and_domain( self.web_path ): continue if self.allow_url_patterns and not any( re.match(regexp_pattern, loc_text) for regexp_pattern in self.allow_url_patterns ): continue els.append( { tag: prop.text for tag in ["loc", "lastmod", "changefreq", "priority"] if (prop := url.find(tag)) } ) for sitemap in soup.find_all("sitemap"): loc = sitemap.find("loc") if not loc: continue soup_child = self.scrape_all([loc.text], "xml")[0] els.extend(self.parse_sitemap(soup_child)) return els def load(self) -> List[Document]: """Load sitemap.""" if self.is_local: try: import bs4 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it" " with `pip install beautifulsoup4`" ) fp = open(self.web_path) soup = bs4.BeautifulSoup(fp, "xml") else: soup = self._scrape(self.web_path, parser="xml") els = self.parse_sitemap(soup) if self.blocksize is not None: elblocks = list(_batch_block(els, self.blocksize)) blockcount = len(elblocks) if blockcount - 1 < self.blocknum: raise ValueError( "Selected sitemap does not contain enough blocks for given blocknum" ) else: els = elblocks[self.blocknum] results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el]) return [ Document( page_content=self.parsing_function(results[i]), metadata=self.meta_function(els[i], results[i]), ) for i in range(len(results)) ]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~evaluation~scoring~eval_chain.py
"""Base classes for scoring the output of a model on a scale of 1-10.""" from __future__ import annotations import logging import re from typing import Any, Dict, List, Optional, Union from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.output_parsers import BaseOutputParser from libs.core.langchain_core.prompts.prompt import PromptTemplate from libs.core.langchain_core.pydantic_v1 import Extra, Field from langchain.callbacks.manager import Callbacks from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.llm import LLMChain from langchain.chat_models.azure_openai import AzureChatOpenAI from langchain.chat_models.openai import ChatOpenAI from langchain.evaluation.criteria.eval_chain import ( CRITERIA_TYPE, Criteria, ) from langchain.evaluation.schema import LLMEvalChain, StringEvaluator from langchain.evaluation.scoring.prompt import ( CRITERIA_INSTRUCTIONS, DEFAULT_CRITERIA, SCORING_TEMPLATE, SCORING_TEMPLATE_WITH_REFERENCE, ) from langchain.schema import RUN_KEY logger = logging.getLogger(__name__) _FIND_DOUBLE_BRACKETS = re.compile(r"\[\[(.*?)\]\]") _SUPPORTED_CRITERIA = { Criteria.CONCISENESS: "Is the submission concise and to the point?", Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?", Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?", Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?", Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?", Criteria.MALICIOUSNESS: "Is the submission malicious in any way?", Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?", Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?", Criteria.MISOGYNY: "Is the submission misogynistic or sexist?", Criteria.CRIMINALITY: "Is the submission criminal in any way?", Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?", Criteria.DEPTH: "Does the submission demonstrate depth of thought?", Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?", Criteria.DETAIL: "Does the submission demonstrate attention to detail?", } def resolve_criteria( criteria: Optional[Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]]], ) -> dict: """Resolve the criteria for the pairwise evaluator. Args: criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. Returns: dict: The resolved criteria. """ if criteria is None: _default_criteria = [ Criteria.HELPFULNESS, Criteria.RELEVANCE, Criteria.CORRECTNESS, Criteria.DEPTH, ] return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria} elif isinstance(criteria, Criteria): criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} elif isinstance(criteria, str): if criteria in _SUPPORTED_CRITERIA: criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} else: criteria_ = {criteria: ""} elif isinstance(criteria, ConstitutionalPrinciple): criteria_ = {criteria.name: criteria.critique_request} elif isinstance(criteria, (list, tuple)): criteria_ = { k: v for criterion in criteria for k, v in resolve_criteria(criterion).items() } else: if not criteria: raise ValueError( "Criteria cannot be empty. " "Please provide a criterion name or a mapping of the criterion name" " to its description." ) criteria_ = dict(criteria) return criteria_ class ScoreStringResultOutputParser(BaseOutputParser[dict]): """A parser for the output of the ScoreStringEvalChain. Attributes: _type (str): The type of the output parser. """ @property def _type(self) -> str: """Return the type of the output parser. Returns: str: The type of the output parser. """ return "pairwise_string_result" def parse(self, text: str) -> Dict[str, Any]: """Parse the output text. Args: text (str): The output text to parse. Returns: Dict: The parsed output. Raises: ValueError: If the verdict is invalid. """ match = _FIND_DOUBLE_BRACKETS.search(text) if match: verdict = match.group(1) if not match or verdict not in list("123456789") + ["10"]: raise ValueError( f"Invalid output: {text}. " "Output must contain a double bracketed string\ with the verdict between 1 and 10." ) return { "reasoning": text, "score": int(verdict), } class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): """A chain for scoring on a scale of 1-10 the output of a model. Attributes: output_parser (BaseOutputParser): The output parser for the chain. Example: >>> from langchain.chat_models import ChatOpenAI >>> from langchain.evaluation.scoring import ScoreStringEvalChain >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4") >>> chain = ScoreStringEvalChain.from_llm(llm=llm) >>> result = chain.evaluate_strings( ... input = "What is the chemical formula for water?", ... prediction = "H2O", ... reference = "The chemical formula for water is H2O.", ... ) >>> print(result) # { # "score": 8, # "comment": "The response accurately states " # "that the chemical formula for water is H2O." # "However, it does not provide an explanation of what the formula means." # } """ output_key: str = "results" #: :meta private: output_parser: BaseOutputParser = Field( default_factory=ScoreStringResultOutputParser ) normalize_by: Optional[float] = None """The value to normalize the score by, if specified.""" criterion_name: str """The name of the criterion being evaluated.""" class Config: """Configuration for the ScoreStringEvalChain.""" extra = Extra.ignore @classmethod def is_lc_serializable(cls) -> bool: return False @property def requires_reference(self) -> bool: """Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return False @property def requires_input(self) -> bool: """Return whether the chain requires an input. Returns: bool: True if the chain requires an input, False otherwise. """ return True @property def evaluation_name(self) -> str: """Get the name of the evaluation. Returns ------- str The name of the evaluation. """ return f"score_string:{self.criterion_name}" @property def _skip_reference_warning(self) -> str: """Return the warning to show when reference is ignored. Returns: str: The warning to show when reference is ignored. """ return ( f"Ignoring reference in {self.__class__.__name__}, as it is not expected." "\nTo use a reference, use the LabeledScoreStringEvalChain instead." " (EvaluatorType.LABELED_SCORE_STRING) instead." ) @classmethod def from_llm( cls, llm: BaseLanguageModel, *, prompt: Optional[PromptTemplate] = None, criteria: Optional[Union[CRITERIA_TYPE, str]] = None, normalize_by: Optional[float] = None, **kwargs: Any, ) -> ScoreStringEvalChain: """Initialize the ScoreStringEvalChain from an LLM. Args: llm (BaseChatModel): The LLM to use (GPT-4 recommended). prompt (PromptTemplate, optional): The prompt to use. **kwargs (Any): Additional keyword arguments. Returns: ScoreStringEvalChain: The initialized ScoreStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ if not ( isinstance(llm, (ChatOpenAI, AzureChatOpenAI)) and llm.model_name.startswith("gpt-4") ): logger.warning( "This chain was only tested with GPT-4. \ Performance may be significantly worse with other models." ) expected_input_vars = {"prediction", "input", "criteria"} prompt_ = prompt or SCORING_TEMPLATE.partial(reference="") if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) criteria_ = resolve_criteria(criteria) criteria_str = "\n".join( f"{k}: {v}" if v else k for k, v in criteria_.items() ).strip() criteria_str = ( CRITERIA_INSTRUCTIONS + f"{criteria_str}\n" if criteria_str else DEFAULT_CRITERIA ) return cls( llm=llm, prompt=prompt_.partial(criteria=criteria_str), normalize_by=normalize_by, criterion_name="-".join(criteria_), **kwargs, ) def _prepare_input( self, prediction: str, input: Optional[str], reference: Optional[str], ) -> dict: """Prepare the input for the chain. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. input (str, optional): The input or task string. reference (str, optional): The reference string, if any. Returns: dict: The prepared input for the chain. """ input_ = { "prediction": prediction, "input": input, } if self.requires_reference: input_["reference"] = reference return input_ def _prepare_output(self, result: dict) -> dict: """Prepare the output.""" parsed = result[self.output_key] if RUN_KEY in result: parsed[RUN_KEY] = result[RUN_KEY] if "score" in parsed and self.normalize_by is not None: parsed["score"] = parsed["score"] / self.normalize_by return parsed def _evaluate_strings( self, *, prediction: str, input: Optional[str] = None, reference: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Score the output string. Args: prediction (str): The output string from the first model. input (str, optional): The input or task string. callbacks (Callbacks, optional): The callbacks to use. reference (str, optional): The reference string, if any. **kwargs (Any): Additional keyword arguments. Returns: dict: A dictionary containing: - reasoning: The reasoning for the preference. - score: A score between 1 and 10. """ input_ = self._prepare_input(prediction, input, reference) result = self( inputs=input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, ) return self._prepare_output(result) async def _aevaluate_string_pairs( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Asynchronously score the output string. Args: prediction (str): The output string from the first model. input (str, optional): The input or task string. callbacks (Callbacks, optional): The callbacks to use. reference (str, optional): The reference string, if any. **kwargs (Any): Additional keyword arguments. Returns: dict: A dictionary containing: - reasoning: The reasoning for the preference. - score: A score between 1 and 10. """ input_ = self._prepare_input(prediction, input, reference) result = await self.acall( inputs=input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, ) return self._prepare_output(result) class LabeledScoreStringEvalChain(ScoreStringEvalChain): """A chain for scoring the output of a model on a scale of 1-10. Attributes: output_parser (BaseOutputParser): The output parser for the chain. """ @property def requires_reference(self) -> bool: """Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return True @classmethod def from_llm( cls, llm: BaseLanguageModel, *, prompt: Optional[PromptTemplate] = None, criteria: Optional[Union[CRITERIA_TYPE, str]] = None, normalize_by: Optional[float] = None, **kwargs: Any, ) -> LabeledScoreStringEvalChain: """Initialize the LabeledScoreStringEvalChain from an LLM. Args: llm (BaseLanguageModel): The LLM to use. prompt (PromptTemplate, optional): The prompt to use. criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. normalize_by (float, optional): The value to normalize the score by. **kwargs (Any): Additional keyword arguments. Returns: LabeledScoreStringEvalChain: The initialized LabeledScoreStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ # noqa: E501 expected_input_vars = { "prediction", "input", "reference", "criteria", } prompt_ = prompt or SCORING_TEMPLATE_WITH_REFERENCE if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) criteria_ = resolve_criteria(criteria) criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()).strip() criteria_str = ( CRITERIA_INSTRUCTIONS + f"{criteria_str}\n" if criteria_str else DEFAULT_CRITERIA ) return cls( llm=llm, prompt=prompt_.partial(criteria=criteria_str), normalize_by=normalize_by, criterion_name="-".join(criteria_), **kwargs, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~generic.py
from __future__ import annotations from pathlib import Path from typing import ( TYPE_CHECKING, Any, Iterator, List, Literal, Optional, Sequence, Union, ) from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser, BaseLoader from langchain_community.document_loaders.blob_loaders import ( BlobLoader, FileSystemBlobLoader, ) from langchain_community.document_loaders.parsers.registry import get_parser if TYPE_CHECKING: from langchain.text_splitter import TextSplitter _PathLike = Union[str, Path] DEFAULT = Literal["default"] class GenericLoader(BaseLoader): """Generic Document Loader. A generic document loader that allows combining an arbitrary blob loader with a blob parser. Examples: Parse a specific PDF file: .. code-block:: python from langchain_community.document_loaders import GenericLoader from langchain_community.document_loaders.parsers.pdf import PyPDFParser # Recursively load all text files in a directory. loader = GenericLoader.from_filesystem( "my_lovely_pdf.pdf", parser=PyPDFParser() ) .. code-block:: python from langchain_community.document_loaders import GenericLoader from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader loader = GenericLoader.from_filesystem( path="path/to/directory", glob="**/[!.]*", suffixes=[".pdf"], show_progress=True, ) docs = loader.lazy_load() next(docs) Example instantiations to change which files are loaded: .. code-block:: python # Recursively load all text files in a directory. loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/*.txt") # Recursively load all non-hidden files in a directory. loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/[!.]*") # Load all files in a directory without recursion. loader = GenericLoader.from_filesystem("/path/to/dir", glob="*") Example instantiations to change which parser is used: .. code-block:: python from langchain_community.document_loaders.parsers.pdf import PyPDFParser # Recursively load all text files in a directory. loader = GenericLoader.from_filesystem( "/path/to/dir", glob="**/*.pdf", parser=PyPDFParser() ) """ # noqa: E501 def __init__( self, blob_loader: BlobLoader, blob_parser: BaseBlobParser, ) -> None: """A generic document loader. Args: blob_loader: A blob loader which knows how to yield blobs blob_parser: A blob parser which knows how to parse blobs into documents """ self.blob_loader = blob_loader self.blob_parser = blob_parser def lazy_load( self, ) -> Iterator[Document]: """Load documents lazily. Use this when working at a large scale.""" for blob in self.blob_loader.yield_blobs(): yield from self.blob_parser.lazy_parse(blob) def load(self) -> List[Document]: """Load all documents.""" return list(self.lazy_load()) def load_and_split( self, text_splitter: Optional[TextSplitter] = None ) -> List[Document]: """Load all documents and split them into sentences.""" raise NotImplementedError( "Loading and splitting is not yet implemented for generic loaders. " "When they will be implemented they will be added via the initializer. " "This method should not be used going forward." ) @classmethod def from_filesystem( cls, path: _PathLike, *, glob: str = "**/[!.]*", exclude: Sequence[str] = (), suffixes: Optional[Sequence[str]] = None, show_progress: bool = False, parser: Union[DEFAULT, BaseBlobParser] = "default", parser_kwargs: Optional[dict] = None, ) -> GenericLoader: """Create a generic document loader using a filesystem blob loader. Args: path: The path to the directory to load documents from OR the path to a single file to load. If this is a file, glob, exclude, suffixes will be ignored. glob: The glob pattern to use to find documents. suffixes: The suffixes to use to filter documents. If None, all files matching the glob will be loaded. exclude: A list of patterns to exclude from the loader. show_progress: Whether to show a progress bar or not (requires tqdm). Proxies to the file system loader. parser: A blob parser which knows how to parse blobs into documents, will instantiate a default parser if not provided. The default can be overridden by either passing a parser or setting the class attribute `blob_parser` (the latter should be used with inheritance). parser_kwargs: Keyword arguments to pass to the parser. Returns: A generic document loader. """ blob_loader = FileSystemBlobLoader( path, glob=glob, exclude=exclude, suffixes=suffixes, show_progress=show_progress, ) if isinstance(parser, str): if parser == "default": try: # If there is an implementation of get_parser on the class, use it. blob_parser = cls.get_parser(**(parser_kwargs or {})) except NotImplementedError: # if not then use the global registry. blob_parser = get_parser(parser) else: blob_parser = get_parser(parser) else: blob_parser = parser return cls(blob_loader, blob_parser) @staticmethod def get_parser(**kwargs: Any) -> BaseBlobParser: """Override this method to associate a default parser with the class.""" raise NotImplementedError()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_transformers~beautiful_soup_transformer.py
from typing import Any, Iterator, List, Sequence, cast from libs.core.langchain_core.documents import BaseDocumentTransformer, Document class BeautifulSoupTransformer(BaseDocumentTransformer): """Transform HTML content by extracting specific tags and removing unwanted ones. Example: .. code-block:: python from langchain_community.document_transformers import BeautifulSoupTransformer bs4_transformer = BeautifulSoupTransformer() docs_transformed = bs4_transformer.transform_documents(docs) """ # noqa: E501 def __init__(self) -> None: """ Initialize the transformer. This checks if the BeautifulSoup4 package is installed. If not, it raises an ImportError. """ try: import bs4 # noqa:F401 except ImportError: raise ImportError( "BeautifulSoup4 is required for BeautifulSoupTransformer. " "Please install it with `pip install beautifulsoup4`." ) def transform_documents( self, documents: Sequence[Document], unwanted_tags: List[str] = ["script", "style"], tags_to_extract: List[str] = ["p", "li", "div", "a"], remove_lines: bool = True, **kwargs: Any, ) -> Sequence[Document]: """ Transform a list of Document objects by cleaning their HTML content. Args: documents: A sequence of Document objects containing HTML content. unwanted_tags: A list of tags to be removed from the HTML. tags_to_extract: A list of tags whose content will be extracted. remove_lines: If set to True, unnecessary lines will be removed from the HTML content. Returns: A sequence of Document objects with transformed content. """ for doc in documents: cleaned_content = doc.page_content cleaned_content = self.remove_unwanted_tags(cleaned_content, unwanted_tags) cleaned_content = self.extract_tags(cleaned_content, tags_to_extract) if remove_lines: cleaned_content = self.remove_unnecessary_lines(cleaned_content) doc.page_content = cleaned_content return documents @staticmethod def remove_unwanted_tags(html_content: str, unwanted_tags: List[str]) -> str: """ Remove unwanted tags from a given HTML content. Args: html_content: The original HTML content string. unwanted_tags: A list of tags to be removed from the HTML. Returns: A cleaned HTML string with unwanted tags removed. """ from bs4 import BeautifulSoup soup = BeautifulSoup(html_content, "html.parser") for tag in unwanted_tags: for element in soup.find_all(tag): element.decompose() return str(soup) @staticmethod def extract_tags(html_content: str, tags: List[str]) -> str: """ Extract specific tags from a given HTML content. Args: html_content: The original HTML content string. tags: A list of tags to be extracted from the HTML. Returns: A string combining the content of the extracted tags. """ from bs4 import BeautifulSoup soup = BeautifulSoup(html_content, "html.parser") text_parts: List[str] = [] for element in soup.find_all(): if element.name in tags: # Extract all navigable strings recursively from this element. text_parts += get_navigable_strings(element) # To avoid duplicate text, remove all descendants from the soup. element.decompose() return " ".join(text_parts) @staticmethod def remove_unnecessary_lines(content: str) -> str: """ Clean up the content by removing unnecessary lines. Args: content: A string, which may contain unnecessary lines or spaces. Returns: A cleaned string with unnecessary lines removed. """ lines = content.split("\n") stripped_lines = [line.strip() for line in lines] non_empty_lines = [line for line in stripped_lines if line] cleaned_content = " ".join(non_empty_lines) return cleaned_content async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any, ) -> Sequence[Document]: raise NotImplementedError def get_navigable_strings(element: Any) -> Iterator[str]: """Get all navigable strings from a BeautifulSoup element. Args: element: A BeautifulSoup element. Returns: A generator of strings. """ from bs4 import NavigableString, Tag for child in cast(Tag, element).children: if isinstance(child, Tag): yield from get_navigable_strings(child) elif isinstance(child, NavigableString): if (element.name == "a") and (href := element.get("href")): yield f"{child.strip()} ({href})" else: yield child.strip()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~discord.py
from __future__ import annotations from typing import TYPE_CHECKING, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader if TYPE_CHECKING: import pandas as pd class DiscordChatLoader(BaseLoader): """Load `Discord` chat logs.""" def __init__(self, chat_log: pd.DataFrame, user_id_col: str = "ID"): """Initialize with a Pandas DataFrame containing chat logs. Args: chat_log: Pandas DataFrame containing chat logs. user_id_col: Name of the column containing the user ID. Defaults to "ID". """ if not isinstance(chat_log, pd.DataFrame): raise ValueError( f"Expected chat_log to be a pd.DataFrame, got {type(chat_log)}" ) self.chat_log = chat_log self.user_id_col = user_id_col def load(self) -> List[Document]: """Load all chat messages.""" result = [] for _, row in self.chat_log.iterrows(): user_id = row[self.user_id_col] metadata = row.to_dict() metadata.pop(self.user_id_col) result.append(Document(page_content=user_id, metadata=metadata)) return result
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_zep.py
# mypy: disable-error-code=attr-defined import copy from random import random from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from uuid import uuid4 import pytest from libs.core.langchain_core.documents import Document from pytest_mock import MockerFixture from langchain_community.vectorstores import ZepVectorStore from langchain_community.vectorstores.zep import CollectionConfig if TYPE_CHECKING: from zep_python.document import Document as ZepDocument from zep_python.document import DocumentCollection VECTOR_DIMS = 5 def gen_vector() -> List[float]: return [random() for _ in range(VECTOR_DIMS)] def gen_mock_zep_document( collection_name: str, embedding_dimensions: Optional[int] = None, ) -> "ZepDocument": from zep_python.document import Document as ZepDocument embedding = ( [random() for _ in range(embedding_dimensions)] if embedding_dimensions else None ) return ZepDocument( uuid=str(uuid4()), collection_name=collection_name, content="Test Document", embedding=embedding, metadata={"key": "value"}, ) @pytest.fixture def texts_metadatas() -> Dict[str, Any]: return { "texts": ["Test Document" for _ in range(2)], "metadatas": [{"key": "value"} for _ in range(2)], } @pytest.fixture def mock_documents() -> List[Document]: return [ Document( page_content="Test Document", metadata={"key": "value"}, ) for _ in range(2) ] @pytest.fixture def texts_metadatas_as_zep_documents() -> List["ZepDocument"]: from zep_python.document import Document as ZepDocument return [ ZepDocument( content="Test Document", metadata={"key": "value"}, ) for _ in range(2) ] @pytest.fixture def search_results() -> List["ZepDocument"]: return [ gen_mock_zep_document( collection_name="test_collection", embedding_dimensions=VECTOR_DIMS ) for _ in range(2) ] @pytest.fixture def search_results_with_query_embedding() -> Tuple[List["ZepDocument"], List[float]]: return_count = 2 return [ gen_mock_zep_document( collection_name="test_collection", embedding_dimensions=VECTOR_DIMS ) for _ in range(return_count) ], gen_vector() @pytest.fixture def mock_collection_config() -> CollectionConfig: return CollectionConfig( name="test_collection", description="Test Collection", metadata={"key": "value"}, embedding_dimensions=VECTOR_DIMS, is_auto_embedded=True, ) @pytest.fixture @pytest.mark.requires("zep_python") def mock_collection( mocker: MockerFixture, mock_collection_config: CollectionConfig, search_results: List[Document], search_results_with_query_embedding: Tuple[List[Document], List[float]], ) -> "DocumentCollection": from zep_python.document import DocumentCollection mock_collection: DocumentCollection = mocker.patch( "zep_python.document.collections.DocumentCollection", autospec=True ) mock_collection.search.return_value = copy.deepcopy(search_results) mock_collection.asearch.return_value = copy.deepcopy(search_results) temp_value = copy.deepcopy(search_results_with_query_embedding) mock_collection.search_return_query_vector.return_value = copy.deepcopy(temp_value) mock_collection.asearch_return_query_vector.return_value = copy.deepcopy(temp_value) mock_collection.name = mock_collection_config.name mock_collection.is_auto_embedded = mock_collection_config.is_auto_embedded mock_collection.embedding_dimensions = mock_collection_config.embedding_dimensions return mock_collection @pytest.fixture @pytest.mark.requires("zep_python") def zep_vectorstore( mocker: MockerFixture, mock_collection: "DocumentCollection", mock_collection_config: CollectionConfig, ) -> ZepVectorStore: mock_document_client = mocker.patch( "zep_python.document.client.DocumentClient", autospec=True ) mock_document_client.get_collection.return_value = mock_collection mock_client = mocker.patch("zep_python.ZepClient", autospec=True) mock_client.return_value.document = mock_document_client vs = ZepVectorStore( mock_collection_config.name, "http://localhost:8080", api_key="test", config=mock_collection_config, ) return vs @pytest.mark.requires("zep_python") def test_from_texts( zep_vectorstore: ZepVectorStore, mock_collection_config: CollectionConfig, mock_collection: "DocumentCollection", texts_metadatas: Dict[str, Any], texts_metadatas_as_zep_documents: List["ZepDocument"], ) -> None: vs = zep_vectorstore.from_texts( **texts_metadatas, collection_name=mock_collection_config.name, api_url="http://localhost:8000", ) vs._collection.add_documents.assert_called_once_with( # type: ignore texts_metadatas_as_zep_documents ) @pytest.mark.requires("zep_python") def test_add_documents( zep_vectorstore: ZepVectorStore, mock_collection: "DocumentCollection", mock_documents: List[Document], texts_metadatas_as_zep_documents: List["ZepDocument"], ) -> None: zep_vectorstore.add_documents(mock_documents) mock_collection.add_documents.assert_called_once_with( # type: ignore texts_metadatas_as_zep_documents ) @pytest.mark.requires("zep_python") async def test_asearch_similarity( zep_vectorstore: ZepVectorStore, ) -> None: r = await zep_vectorstore.asearch( query="Test Document", search_type="similarity", k=2 ) assert len(r) == 2 assert r[0].page_content == "Test Document" assert r[0].metadata == {"key": "value"} @pytest.mark.requires("zep_python") async def test_asearch_mmr( zep_vectorstore: ZepVectorStore, ) -> None: r = await zep_vectorstore.asearch(query="Test Document", search_type="mmr", k=1) assert len(r) == 1 assert r[0].page_content == "Test Document" assert r[0].metadata == {"key": "value"} @pytest.mark.requires("zep_python") def test_search_similarity( zep_vectorstore: ZepVectorStore, ) -> None: r = zep_vectorstore.search(query="Test Document", search_type="similarity", k=2) assert len(r) == 2 assert r[0].page_content == "Test Document" assert r[0].metadata == {"key": "value"} @pytest.mark.requires("zep_python") def test_search_mmr( zep_vectorstore: ZepVectorStore, ) -> None: r = zep_vectorstore.search(query="Test Document", search_type="mmr", k=1) assert len(r) == 1 assert r[0].page_content == "Test Document" assert r[0].metadata == {"key": "value"}
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~brave_search.py
from typing import Iterator, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.utilities.brave_search import BraveSearchWrapper class BraveSearchLoader(BaseLoader): """Load with `Brave Search` engine.""" def __init__(self, query: str, api_key: str, search_kwargs: Optional[dict] = None): """Initializes the BraveLoader. Args: query: The query to search for. api_key: The API key to use. search_kwargs: The search kwargs to use. """ self.query = query self.api_key = api_key self.search_kwargs = search_kwargs or {} def load(self) -> List[Document]: brave_client = BraveSearchWrapper( api_key=self.api_key, search_kwargs=self.search_kwargs, ) return brave_client.download_documents(self.query) def lazy_load(self) -> Iterator[Document]: for doc in self.load(): yield doc
[]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~smith~evaluation~test_runner_utils.py
from typing import Iterator, List from uuid import uuid4 import pytest from libs.core.langchain_core.messages import BaseMessage, HumanMessage from libs.core.langchain_core.prompts.chat import ChatPromptTemplate from langsmith import Client as Client from langsmith.schemas import DataType from langchain.chains.llm import LLMChain from langchain.chat_models import ChatOpenAI from langchain.evaluation import EvaluatorType from langchain.llms.openai import OpenAI from langchain.smith import RunEvalConfig, run_on_dataset from langchain.smith.evaluation import InputFormatError from langchain.smith.evaluation.runner_utils import arun_on_dataset def _check_all_feedback_passed(_project_name: str, client: Client) -> None: # Assert that all runs completed, all feedback completed, and that the # chain or llm passes for the feedback provided. runs = list(client.list_runs(project_name=_project_name, execution_order=1)) if not runs: # Queue delays. We are mainly just smoke checking rn. return feedback = list(client.list_feedback(run_ids=[run.id for run in runs])) if not feedback: return assert all([f.score == 1 for f in feedback]) @pytest.fixture def eval_project_name() -> str: return f"lcp integration tests - {str(uuid4())[-8:]}" @pytest.fixture(scope="module") def client() -> Client: return Client() @pytest.fixture( scope="module", ) def kv_dataset_name() -> Iterator[str]: import pandas as pd client = Client() df = pd.DataFrame( { "some_input": [ "What's the capital of California?", "What's the capital of Nevada?", "What's the capital of Oregon?", "What's the capital of Washington?", ], "other_input": [ "a", "b", "c", "d", ], "some_output": ["Sacramento", "Carson City", "Salem", "Olympia"], "other_output": ["e", "f", "g", "h"], } ) uid = str(uuid4())[-8:] _dataset_name = f"lcp kv dataset integration tests - {uid}" client.upload_dataframe( df, name=_dataset_name, input_keys=["some_input", "other_input"], output_keys=["some_output", "other_output"], description="Integration test dataset", ) yield _dataset_name def test_chat_model( kv_dataset_name: str, eval_project_name: str, client: Client ) -> None: llm = ChatOpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) with pytest.raises(ValueError, match="Must specify reference_key"): run_on_dataset( dataset_name=kv_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, client=client, ) eval_config = RunEvalConfig( evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA], reference_key="some_output", ) with pytest.raises( InputFormatError, match="Example inputs do not match language model" ): run_on_dataset( dataset_name=kv_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, client=client, ) def input_mapper(d: dict) -> List[BaseMessage]: return [HumanMessage(content=d["some_input"])] run_on_dataset( client=client, dataset_name=kv_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, input_mapper=input_mapper, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) def test_llm(kv_dataset_name: str, eval_project_name: str, client: Client) -> None: llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) with pytest.raises(ValueError, match="Must specify reference_key"): run_on_dataset( dataset_name=kv_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, client=client, ) eval_config = RunEvalConfig( evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA], reference_key="some_output", ) with pytest.raises( InputFormatError, match="Example inputs do not match language model" ): run_on_dataset( dataset_name=kv_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, client=client, ) def input_mapper(d: dict) -> str: return d["some_input"] run_on_dataset( client=client, dataset_name=kv_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, input_mapper=input_mapper, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) def test_chain(kv_dataset_name: str, eval_project_name: str, client: Client) -> None: llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string(llm, "The answer to the {question} is: ") eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) with pytest.raises(ValueError, match="Must specify reference_key"): run_on_dataset( dataset_name=kv_dataset_name, llm_or_chain_factory=lambda: chain, evaluation=eval_config, client=client, ) eval_config = RunEvalConfig( evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA], reference_key="some_output", ) with pytest.raises( InputFormatError, match="Example inputs do not match chain input keys" ): run_on_dataset( dataset_name=kv_dataset_name, llm_or_chain_factory=lambda: chain, evaluation=eval_config, client=client, ) def input_mapper(d: dict) -> dict: return {"input": d["some_input"]} with pytest.raises( InputFormatError, match=" match the chain's expected input keys.", ): run_on_dataset( dataset_name=kv_dataset_name, llm_or_chain_factory=lambda: input_mapper | chain, client=client, evaluation=eval_config, ) def right_input_mapper(d: dict) -> dict: return {"question": d["some_input"]} run_on_dataset( dataset_name=kv_dataset_name, llm_or_chain_factory=lambda: right_input_mapper | chain, client=client, evaluation=eval_config, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) ### Testing Chat Datasets @pytest.fixture( scope="module", ) def chat_dataset_name() -> Iterator[str]: def _create_message(txt: str, role: str = "human") -> List[dict]: return [{"type": role, "data": {"content": txt}}] import pandas as pd client = Client() df = pd.DataFrame( { "input": [ _create_message(txt) for txt in ( "What's the capital of California?", "What's the capital of Nevada?", "What's the capital of Oregon?", "What's the capital of Washington?", ) ], "output": [ _create_message(txt, role="ai")[0] for txt in ("Sacramento", "Carson City", "Salem", "Olympia") ], } ) uid = str(uuid4())[-8:] _dataset_name = f"lcp chat dataset integration tests - {uid}" ds = client.create_dataset( _dataset_name, description="Integration test dataset", data_type=DataType.chat ) for row in df.itertuples(): client.create_example( dataset_id=ds.id, inputs={"input": row.input}, outputs={"output": row.output}, ) yield _dataset_name def test_chat_model_on_chat_dataset( chat_dataset_name: str, eval_project_name: str, client: Client ) -> None: llm = ChatOpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) run_on_dataset( dataset_name=chat_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, client=client, project_name=eval_project_name, ) _check_all_feedback_passed(eval_project_name, client) def test_llm_on_chat_dataset( chat_dataset_name: str, eval_project_name: str, client: Client ) -> None: llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) run_on_dataset( dataset_name=chat_dataset_name, llm_or_chain_factory=llm, client=client, evaluation=eval_config, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) def test_chain_on_chat_dataset(chat_dataset_name: str, client: Client) -> None: llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string(llm, "The answer to the {question} is: ") eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) with pytest.raises( ValueError, match="Cannot evaluate a chain on dataset with data_type=chat" ): run_on_dataset( dataset_name=chat_dataset_name, client=client, llm_or_chain_factory=lambda: chain, evaluation=eval_config, ) @pytest.fixture( scope="module", ) def llm_dataset_name() -> Iterator[str]: import pandas as pd client = Client() df = pd.DataFrame( { "input": [ "What's the capital of California?", "What's the capital of Nevada?", "What's the capital of Oregon?", "What's the capital of Washington?", ], "output": ["Sacramento", "Carson City", "Salem", "Olympia"], } ) uid = str(uuid4())[-8:] _dataset_name = f"lcp llm dataset integration tests - {uid}" client.upload_dataframe( df, name=_dataset_name, input_keys=["input"], output_keys=["output"], description="Integration test dataset", data_type=DataType.llm, ) yield _dataset_name def test_chat_model_on_llm_dataset( llm_dataset_name: str, eval_project_name: str, client: Client ) -> None: llm = ChatOpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) run_on_dataset( client=client, dataset_name=llm_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) def test_llm_on_llm_dataset( llm_dataset_name: str, eval_project_name: str, client: Client ) -> None: llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) run_on_dataset( client=client, dataset_name=llm_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) def test_chain_on_llm_dataset(llm_dataset_name: str, client: Client) -> None: llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string(llm, "The answer to the {question} is: ") eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) with pytest.raises( ValueError, match="Cannot evaluate a chain on dataset with data_type=llm" ): run_on_dataset( client=client, dataset_name=llm_dataset_name, llm_or_chain_factory=lambda: chain, evaluation=eval_config, ) @pytest.fixture( scope="module", ) def kv_singleio_dataset_name() -> Iterator[str]: import pandas as pd client = Client() df = pd.DataFrame( { "the wackiest input": [ "What's the capital of California?", "What's the capital of Nevada?", "What's the capital of Oregon?", "What's the capital of Washington?", ], "unthinkable output": ["Sacramento", "Carson City", "Salem", "Olympia"], } ) uid = str(uuid4())[-8:] _dataset_name = f"lcp singleio kv dataset integration tests - {uid}" client.upload_dataframe( df, name=_dataset_name, input_keys=["the wackiest input"], output_keys=["unthinkable output"], description="Integration test dataset", ) yield _dataset_name def test_chat_model_on_kv_singleio_dataset( kv_singleio_dataset_name: str, eval_project_name: str, client: Client ) -> None: llm = ChatOpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) run_on_dataset( dataset_name=kv_singleio_dataset_name, llm_or_chain_factory=llm, evaluation=eval_config, client=client, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) def test_llm_on_kv_singleio_dataset( kv_singleio_dataset_name: str, eval_project_name: str, client: Client ) -> None: llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) run_on_dataset( dataset_name=kv_singleio_dataset_name, llm_or_chain_factory=llm, client=client, evaluation=eval_config, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) def test_chain_on_kv_singleio_dataset( kv_singleio_dataset_name: str, eval_project_name: str, client: Client ) -> None: llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string(llm, "The answer to the {question} is: ") eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) run_on_dataset( dataset_name=kv_singleio_dataset_name, llm_or_chain_factory=lambda: chain, client=client, evaluation=eval_config, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) async def test_runnable_on_kv_singleio_dataset( kv_singleio_dataset_name: str, eval_project_name: str, client: Client ) -> None: runnable = ( ChatPromptTemplate.from_messages([("human", "{the wackiest input}")]) | ChatOpenAI() ) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) await arun_on_dataset( dataset_name=kv_singleio_dataset_name, llm_or_chain_factory=runnable, client=client, evaluation=eval_config, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client) async def test_arb_func_on_kv_singleio_dataset( kv_singleio_dataset_name: str, eval_project_name: str, client: Client ) -> None: runnable = ( ChatPromptTemplate.from_messages([("human", "{the wackiest input}")]) | ChatOpenAI() ) def my_func(x: dict) -> str: content = runnable.invoke(x).content if isinstance(content, str): return content else: raise ValueError( f"Expected message with content type string, got {content}" ) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA]) await arun_on_dataset( dataset_name=kv_singleio_dataset_name, llm_or_chain_factory=my_func, client=client, evaluation=eval_config, project_name=eval_project_name, tags=["shouldpass"], ) _check_all_feedback_passed(eval_project_name, client)
[ "some_input", "[('human', '{the wackiest input}')]" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~onedrive.py
"""Loads data from OneDrive""" from __future__ import annotations import logging from typing import TYPE_CHECKING, Iterator, List, Optional, Sequence, Union from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import Field from langchain_community.document_loaders.base_o365 import ( O365BaseLoader, _FileType, ) from langchain_community.document_loaders.parsers.registry import get_parser if TYPE_CHECKING: from O365.drive import Drive, Folder logger = logging.getLogger(__name__) class OneDriveLoader(O365BaseLoader): """Load from `Microsoft OneDrive`.""" drive_id: str = Field(...) """ The ID of the OneDrive drive to load data from.""" folder_path: Optional[str] = None """ The path to the folder to load data from.""" object_ids: Optional[List[str]] = None """ The IDs of the objects to load data from.""" @property def _file_types(self) -> Sequence[_FileType]: """Return supported file types.""" return _FileType.DOC, _FileType.DOCX, _FileType.PDF @property def _scopes(self) -> List[str]: """Return required scopes.""" return ["offline_access", "Files.Read.All"] def _get_folder_from_path(self, drive: Drive) -> Union[Folder, Drive]: """ Returns the folder or drive object located at the specified path relative to the given drive. Args: drive (Drive): The root drive from which the folder path is relative. Returns: Union[Folder, Drive]: The folder or drive object located at the specified path. Raises: FileNotFoundError: If the path does not exist. """ subfolder_drive = drive if self.folder_path is None: return subfolder_drive subfolders = [f for f in self.folder_path.split("/") if f != ""] if len(subfolders) == 0: return subfolder_drive items = subfolder_drive.get_items() for subfolder in subfolders: try: subfolder_drive = list(filter(lambda x: subfolder in x.name, items))[0] items = subfolder_drive.get_items() except (IndexError, AttributeError): raise FileNotFoundError("Path {} not exist.".format(self.folder_path)) return subfolder_drive def lazy_load(self) -> Iterator[Document]: """Load documents lazily. Use this when working at a large scale.""" try: from O365.drive import Drive except ImportError: raise ImportError( "O365 package not found, please install it with `pip install o365`" ) drive = self._auth().storage().get_drive(self.drive_id) if not isinstance(drive, Drive): raise ValueError(f"There isn't a Drive with id {self.drive_id}.") blob_parser = get_parser("default") if self.folder_path: folder = self._get_folder_from_path(drive) for blob in self._load_from_folder(folder): yield from blob_parser.lazy_parse(blob) if self.object_ids: for blob in self._load_from_object_ids(drive, self.object_ids): yield from blob_parser.lazy_parse(blob) def load(self) -> List[Document]: """Load all documents.""" return list(self.lazy_load())
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~openai_functions~openapi.py
from __future__ import annotations import json import re from collections import defaultdict from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import requests from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate from libs.core.langchain_core.utils.input import get_colored_text from requests import Response from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sequential import SequentialChain from langchain.chat_models import ChatOpenAI from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain.tools import APIOperation from langchain.utilities.openapi import OpenAPISpec if TYPE_CHECKING: from openapi_pydantic import Parameter def _get_description(o: Any, prefer_short: bool) -> Optional[str]: summary = getattr(o, "summary", None) description = getattr(o, "description", None) if prefer_short: return summary or description return description or summary def _format_url(url: str, path_params: dict) -> str: expected_path_param = re.findall(r"{(.*?)}", url) new_params = {} for param in expected_path_param: clean_param = param.lstrip(".;").rstrip("*") val = path_params[clean_param] if isinstance(val, list): if param[0] == ".": sep = "." if param[-1] == "*" else "," new_val = "." + sep.join(val) elif param[0] == ";": sep = f"{clean_param}=" if param[-1] == "*" else "," new_val = f"{clean_param}=" + sep.join(val) else: new_val = ",".join(val) elif isinstance(val, dict): kv_sep = "=" if param[-1] == "*" else "," kv_strs = [kv_sep.join((k, v)) for k, v in val.items()] if param[0] == ".": sep = "." new_val = "." elif param[0] == ";": sep = ";" new_val = ";" else: sep = "," new_val = "" new_val += sep.join(kv_strs) else: if param[0] == ".": new_val = f".{val}" elif param[0] == ";": new_val = f";{clean_param}={val}" else: new_val = val new_params[param] = new_val return url.format(**new_params) def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec) -> dict: properties = {} required = [] for p in params: if p.param_schema: schema = spec.get_schema(p.param_schema) else: media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore # noqa: E501 schema = spec.get_schema(media_type_schema) if p.description and not schema.description: schema.description = p.description properties[p.name] = json.loads(schema.json(exclude_none=True)) if p.required: required.append(p.name) return {"type": "object", "properties": properties, "required": required} def openapi_spec_to_openai_fn( spec: OpenAPISpec, ) -> Tuple[List[Dict[str, Any]], Callable]: """Convert a valid OpenAPI spec to the JSON Schema format expected for OpenAI functions. Args: spec: OpenAPI spec to convert. Returns: Tuple of the OpenAI functions JSON schema and a default function for executing a request based on the OpenAI function schema. """ if not spec.paths: return [], lambda: None functions = [] _name_to_call_map = {} for path in spec.paths: path_params = { (p.name, p.param_in): p for p in spec.get_parameters_for_path(path) } for method in spec.get_methods_for_path(path): request_args = {} op = spec.get_operation(path, method) op_params = path_params.copy() for param in spec.get_parameters_for_operation(op): op_params[(param.name, param.param_in)] = param params_by_type = defaultdict(list) for name_loc, p in op_params.items(): params_by_type[name_loc[1]].append(p) param_loc_to_arg_name = { "query": "params", "header": "headers", "cookie": "cookies", "path": "path_params", } for param_loc, arg_name in param_loc_to_arg_name.items(): if params_by_type[param_loc]: request_args[arg_name] = _openapi_params_to_json_schema( params_by_type[param_loc], spec ) request_body = spec.get_request_body_for_operation(op) # TODO: Support more MIME types. if request_body and request_body.content: media_types = {} for media_type, media_type_object in request_body.content.items(): if media_type_object.media_type_schema: schema = spec.get_schema(media_type_object.media_type_schema) media_types[media_type] = json.loads( schema.json(exclude_none=True) ) if len(media_types) == 1: media_type, schema_dict = list(media_types.items())[0] key = "json" if media_type == "application/json" else "data" request_args[key] = schema_dict elif len(media_types) > 1: request_args["data"] = {"anyOf": list(media_types.values())} api_op = APIOperation.from_openapi_spec(spec, path, method) fn = { "name": api_op.operation_id, "description": api_op.description, "parameters": { "type": "object", "properties": request_args, }, } functions.append(fn) _name_to_call_map[fn["name"]] = { "method": method, "url": api_op.base_url + api_op.path, } def default_call_api( name: str, fn_args: dict, headers: Optional[dict] = None, params: Optional[dict] = None, **kwargs: Any, ) -> Any: method = _name_to_call_map[name]["method"] url = _name_to_call_map[name]["url"] path_params = fn_args.pop("path_params", {}) url = _format_url(url, path_params) if "data" in fn_args and isinstance(fn_args["data"], dict): fn_args["data"] = json.dumps(fn_args["data"]) _kwargs = {**fn_args, **kwargs} if headers is not None: if "headers" in _kwargs: _kwargs["headers"].update(headers) else: _kwargs["headers"] = headers if params is not None: if "params" in _kwargs: _kwargs["params"].update(params) else: _kwargs["params"] = params return requests.request(method, url, **_kwargs) return functions, default_call_api class SimpleRequestChain(Chain): """Chain for making a simple request to an API endpoint.""" request_method: Callable """Method to use for making the request.""" output_key: str = "response" """Key to use for the output of the request.""" input_key: str = "function" """Key to use for the input of the request.""" @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the logic of this chain and return the output.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() name = inputs[self.input_key].pop("name") args = inputs[self.input_key].pop("arguments") _pretty_name = get_colored_text(name, "green") _pretty_args = get_colored_text(json.dumps(args, indent=2), "green") _text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args _run_manager.on_text(_text) api_response: Response = self.request_method(name, args) if api_response.status_code != 200: response = ( f"{api_response.status_code}: {api_response.reason}" + f"\nFor {name} " + f"Called with args: {args.get('params','')}" ) else: try: response = api_response.json() except Exception: # noqa: E722 response = api_response.text return {self.output_key: response} def get_openapi_chain( spec: Union[OpenAPISpec, str], llm: Optional[BaseLanguageModel] = None, prompt: Optional[BasePromptTemplate] = None, request_chain: Optional[Chain] = None, llm_chain_kwargs: Optional[Dict] = None, verbose: bool = False, headers: Optional[Dict] = None, params: Optional[Dict] = None, **kwargs: Any, ) -> SequentialChain: """Create a chain for querying an API from a OpenAPI spec. Args: spec: OpenAPISpec or url/file/text string corresponding to one. llm: language model, should be an OpenAI function-calling model, e.g. `ChatOpenAI(model="gpt-3.5-turbo-0613")`. prompt: Main prompt template to use. request_chain: Chain for taking the functions output and executing the request. """ if isinstance(spec, str): for conversion in ( OpenAPISpec.from_url, OpenAPISpec.from_file, OpenAPISpec.from_text, ): try: spec = conversion(spec) # type: ignore[arg-type] break except ImportError as e: raise e except Exception: # noqa: E722 pass if isinstance(spec, str): raise ValueError(f"Unable to parse spec from source {spec}") openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec) llm = llm or ChatOpenAI( model="gpt-3.5-turbo-0613", ) prompt = prompt or ChatPromptTemplate.from_template( "Use the provided API's to respond to this user query:\n\n{query}" ) llm_chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs={"functions": openai_fns}, output_parser=JsonOutputFunctionsParser(args_only=False), output_key="function", verbose=verbose, **(llm_chain_kwargs or {}), ) request_chain = request_chain or SimpleRequestChain( request_method=lambda name, args: call_api_fn( name, args, headers=headers, params=params ), verbose=verbose, ) return SequentialChain( chains=[llm_chain, request_chain], input_variables=llm_chain.input_keys, output_variables=["response"], verbose=verbose, **kwargs, )
[ "Use the provided API's to respond to this user query:\n\n{query}" ]
2024-01-10
mth93/langchain
templates~rag-pinecone-rerank~rag_pinecone_rerank~chain.py
import os from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank from langchain.vectorstores import Pinecone from libs.core.langchain_core.output_parsers import StrOutputParser from libs.core.langchain_core.pydantic_v1 import BaseModel from libs.core.langchain_core.runnables import RunnableParallel, RunnablePassthrough if os.environ.get("PINECONE_API_KEY", None) is None: raise Exception("Missing `PINECONE_API_KEY` environment variable.") if os.environ.get("PINECONE_ENVIRONMENT", None) is None: raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # Load # from langchain.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() # # Split # from langchain.text_splitter import RecursiveCharacterTextSplitter # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) # all_splits = text_splitter.split_documents(data) # # Add to vectorDB # vectorstore = Pinecone.from_documents( # documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME # ) # retriever = vectorstore.as_retriever() vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings()) # Get k=10 docs retriever = vectorstore.as_retriever(search_kwargs={"k": 10}) # Re-rank compressor = CohereRerank() compression_retriever = ContextualCompressionRetriever( base_compressor=compressor, base_retriever=retriever ) # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( RunnableParallel( {"context": compression_retriever, "question": RunnablePassthrough()} ) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
[ "Answer the question based only on the following context:\n{context}\nQuestion: {question}\n" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~datadog_logs.py
from datetime import datetime, timedelta from typing import List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class DatadogLogsLoader(BaseLoader): """Load `Datadog` logs. Logs are written into the `page_content` and into the `metadata`. """ def __init__( self, query: str, api_key: str, app_key: str, from_time: Optional[int] = None, to_time: Optional[int] = None, limit: int = 100, ) -> None: """Initialize Datadog document loader. Requirements: - Must have datadog_api_client installed. Install with `pip install datadog_api_client`. Args: query: The query to run in Datadog. api_key: The Datadog API key. app_key: The Datadog APP key. from_time: Optional. The start of the time range to query. Supports date math and regular timestamps (milliseconds) like '1688732708951' Defaults to 20 minutes ago. to_time: Optional. The end of the time range to query. Supports date math and regular timestamps (milliseconds) like '1688732708951' Defaults to now. limit: The maximum number of logs to return. Defaults to 100. """ # noqa: E501 try: from datadog_api_client import Configuration except ImportError as ex: raise ImportError( "Could not import datadog_api_client python package. " "Please install it with `pip install datadog_api_client`." ) from ex self.query = query configuration = Configuration() configuration.api_key["apiKeyAuth"] = api_key configuration.api_key["appKeyAuth"] = app_key self.configuration = configuration self.from_time = from_time self.to_time = to_time self.limit = limit def parse_log(self, log: dict) -> Document: """ Create Document objects from Datadog log items. """ attributes = log.get("attributes", {}) metadata = { "id": log.get("id", ""), "status": attributes.get("status"), "service": attributes.get("service", ""), "tags": attributes.get("tags", []), "timestamp": attributes.get("timestamp", ""), } message = attributes.get("message", "") inside_attributes = attributes.get("attributes", {}) content_dict = {**inside_attributes, "message": message} content = ", ".join(f"{k}: {v}" for k, v in content_dict.items()) return Document(page_content=content, metadata=metadata) def load(self) -> List[Document]: """ Get logs from Datadog. Returns: A list of Document objects. - page_content - metadata - id - service - status - tags - timestamp """ try: from datadog_api_client import ApiClient from datadog_api_client.v2.api.logs_api import LogsApi from datadog_api_client.v2.model.logs_list_request import LogsListRequest from datadog_api_client.v2.model.logs_list_request_page import ( LogsListRequestPage, ) from datadog_api_client.v2.model.logs_query_filter import LogsQueryFilter from datadog_api_client.v2.model.logs_sort import LogsSort except ImportError as ex: raise ImportError( "Could not import datadog_api_client python package. " "Please install it with `pip install datadog_api_client`." ) from ex now = datetime.now() twenty_minutes_before = now - timedelta(minutes=20) now_timestamp = int(now.timestamp() * 1000) twenty_minutes_before_timestamp = int(twenty_minutes_before.timestamp() * 1000) _from = ( self.from_time if self.from_time is not None else twenty_minutes_before_timestamp ) body = LogsListRequest( filter=LogsQueryFilter( query=self.query, _from=_from, to=f"{self.to_time if self.to_time is not None else now_timestamp}", ), sort=LogsSort.TIMESTAMP_ASCENDING, page=LogsListRequestPage( limit=self.limit, ), ) with ApiClient(configuration=self.configuration) as api_client: api_instance = LogsApi(api_client) response = api_instance.list_logs(body=body).to_dict() docs: List[Document] = [] for row in response["data"]: docs.append(self.parse_log(row)) return docs
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~nuclia.py
import json import uuid from typing import List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI class NucliaLoader(BaseLoader): """Load from any file type using `Nuclia Understanding API`.""" def __init__(self, path: str, nuclia_tool: NucliaUnderstandingAPI): self.nua = nuclia_tool self.id = str(uuid.uuid4()) self.nua.run({"action": "push", "id": self.id, "path": path, "text": None}) def load(self) -> List[Document]: """Load documents.""" data = self.nua.run( {"action": "pull", "id": self.id, "path": None, "text": None} ) if not data: return [] obj = json.loads(data) text = obj["extracted_text"][0]["body"]["text"] print(text) metadata = { "file": obj["file_extracted_data"][0], "metadata": obj["field_metadata"][0], } return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~edenai~ocr_identityparser.py
from __future__ import annotations import logging from typing import Optional from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool logger = logging.getLogger(__name__) class EdenAiParsingIDTool(EdenaiTool): """Tool that queries the Eden AI Identity parsing API. for api reference check edenai documentation: https://docs.edenai.co/reference/ocr_identity_parser_create. To use, you should have the environment variable ``EDENAI_API_KEY`` set with your API token. You can find your token here: https://app.edenai.run/admin/account/settings """ name = "edenai_identity_parsing" description = ( "A wrapper around edenai Services Identity parsing. " "Useful for when you have to extract information from an ID Document " "Input should be the string url of the document to parse." ) feature = "ocr" subfeature = "identity_parser" language: Optional[str] = None """ language of the text passed to the model. """ def _parse_response(self, response: list) -> str: formatted_list: list = [] if len(response) == 1: self._parse_json_multilevel( response[0]["extracted_data"][0], formatted_list ) else: for entry in response: if entry.get("provider") == "eden-ai": self._parse_json_multilevel( entry["extracted_data"][0], formatted_list ) return "\n".join(formatted_list) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" query_params = { "file_url": query, "language": self.language, "attributes_as_list": False, } return self._call_eden_ai(query_params)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~qdrant~test_embedding_interface.py
import uuid from typing import Callable, Optional import pytest from libs.core.langchain_core.embeddings import Embeddings from langchain_community.vectorstores import Qdrant from tests.integration_tests.vectorstores.fake_embeddings import ( ConsistentFakeEmbeddings, ) @pytest.mark.parametrize( ["embeddings", "embedding_function"], [ (ConsistentFakeEmbeddings(), None), (ConsistentFakeEmbeddings().embed_query, None), (None, ConsistentFakeEmbeddings().embed_query), ], ) def test_qdrant_embedding_interface( embeddings: Optional[Embeddings], embedding_function: Optional[Callable] ) -> None: """Test Qdrant may accept different types for embeddings.""" from qdrant_client import QdrantClient client = QdrantClient(":memory:") collection_name = uuid.uuid4().hex Qdrant( client, collection_name, embeddings=embeddings, embedding_function=embedding_function, ) @pytest.mark.parametrize( ["embeddings", "embedding_function"], [ (ConsistentFakeEmbeddings(), ConsistentFakeEmbeddings().embed_query), (None, None), ], ) def test_qdrant_embedding_interface_raises_value_error( embeddings: Optional[Embeddings], embedding_function: Optional[Callable] ) -> None: """Test Qdrant requires only one method for embeddings.""" from qdrant_client import QdrantClient client = QdrantClient(":memory:") collection_name = uuid.uuid4().hex with pytest.raises(ValueError): Qdrant( client, collection_name, embeddings=embeddings, embedding_function=embedding_function, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~mongodb.py
import asyncio import logging from typing import Dict, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class MongodbLoader(BaseLoader): """Load MongoDB documents.""" def __init__( self, connection_string: str, db_name: str, collection_name: str, *, filter_criteria: Optional[Dict] = None, ) -> None: try: from motor.motor_asyncio import AsyncIOMotorClient except ImportError as e: raise ImportError( "Cannot import from motor, please install with `pip install motor`." ) from e if not connection_string: raise ValueError("connection_string must be provided.") if not db_name: raise ValueError("db_name must be provided.") if not collection_name: raise ValueError("collection_name must be provided.") self.client = AsyncIOMotorClient(connection_string) self.db_name = db_name self.collection_name = collection_name self.filter_criteria = filter_criteria or {} self.db = self.client.get_database(db_name) self.collection = self.db.get_collection(collection_name) def load(self) -> List[Document]: """Load data into Document objects. Attention: This implementation starts an asyncio event loop which will only work if running in a sync env. In an async env, it should fail since there is already an event loop running. This code should be updated to kick off the event loop from a separate thread if running within an async context. """ return asyncio.run(self.aload()) async def aload(self) -> List[Document]: """Load data into Document objects.""" result = [] total_docs = await self.collection.count_documents(self.filter_criteria) async for doc in self.collection.find(self.filter_criteria): metadata = { "database": self.db_name, "collection": self.collection_name, } result.append(Document(page_content=str(doc), metadata=metadata)) if len(result) != total_docs: logger.warning( f"Only partial collection of documents returned. Loaded {len(result)} " f"docs, expected {total_docs}." ) return result
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~sqlitevss.py
from __future__ import annotations import json import logging import warnings from typing import ( TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type, ) from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore if TYPE_CHECKING: import sqlite3 logger = logging.getLogger(__name__) class SQLiteVSS(VectorStore): """Wrapper around SQLite with vss extension as a vector database. To use, you should have the ``sqlite-vss`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import SQLiteVSS from langchain_community.embeddings.openai import OpenAIEmbeddings ... """ def __init__( self, table: str, connection: Optional[sqlite3.Connection], embedding: Embeddings, db_file: str = "vss.db", ): """Initialize with sqlite client with vss extension.""" try: import sqlite_vss # noqa # pylint: disable=unused-import except ImportError: raise ImportError( "Could not import sqlite-vss python package. " "Please install it with `pip install sqlite-vss`." ) if not connection: connection = self.create_connection(db_file) if not isinstance(embedding, Embeddings): warnings.warn("embeddings input must be Embeddings object.") self._connection = connection self._table = table self._embedding = embedding self.create_table_if_not_exists() def create_table_if_not_exists(self) -> None: self._connection.execute( f""" CREATE TABLE IF NOT EXISTS {self._table} ( rowid INTEGER PRIMARY KEY AUTOINCREMENT, text TEXT, metadata BLOB, text_embedding BLOB ) ; """ ) self._connection.execute( f""" CREATE VIRTUAL TABLE IF NOT EXISTS vss_{self._table} USING vss0( text_embedding({self.get_dimensionality()}) ); """ ) self._connection.execute( f""" CREATE TRIGGER IF NOT EXISTS embed_text AFTER INSERT ON {self._table} BEGIN INSERT INTO vss_{self._table}(rowid, text_embedding) VALUES (new.rowid, new.text_embedding) ; END; """ ) self._connection.commit() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters """ max_id = self._connection.execute( f"SELECT max(rowid) as rowid FROM {self._table}" ).fetchone()["rowid"] if max_id is None: # no text added yet max_id = 0 embeds = self._embedding.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] data_input = [ (text, json.dumps(metadata), json.dumps(embed)) for text, metadata, embed in zip(texts, metadatas, embeds) ] self._connection.executemany( f"INSERT INTO {self._table}(text, metadata, text_embedding) " f"VALUES (?,?,?)", data_input, ) self._connection.commit() # pulling every ids we just inserted results = self._connection.execute( f"SELECT rowid FROM {self._table} WHERE rowid > {max_id}" ) return [row["rowid"] for row in results] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: sql_query = f""" SELECT text, metadata, distance FROM {self._table} e INNER JOIN vss_{self._table} v on v.rowid = e.rowid WHERE vss_search( v.text_embedding, vss_search_params('{json.dumps(embedding)}', {k}) ) """ cursor = self._connection.cursor() cursor.execute(sql_query) results = cursor.fetchall() documents = [] for row in results: metadata = json.loads(row["metadata"]) or {} doc = Document(page_content=row["text"], metadata=metadata) documents.append((doc, row["distance"])) return documents def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" embedding = self._embedding.embed_query(query) documents = self.similarity_search_with_score_by_vector( embedding=embedding, k=k ) return [doc for doc, _ in documents] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query.""" embedding = self._embedding.embed_query(query) documents = self.similarity_search_with_score_by_vector( embedding=embedding, k=k ) return documents def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: documents = self.similarity_search_with_score_by_vector( embedding=embedding, k=k ) return [doc for doc, _ in documents] @classmethod def from_texts( cls: Type[SQLiteVSS], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, table: str = "langchain", db_file: str = "vss.db", **kwargs: Any, ) -> SQLiteVSS: """Return VectorStore initialized from texts and embeddings.""" connection = cls.create_connection(db_file) vss = cls( table=table, connection=connection, db_file=db_file, embedding=embedding ) vss.add_texts(texts=texts, metadatas=metadatas) return vss @staticmethod def create_connection(db_file: str) -> sqlite3.Connection: import sqlite3 import sqlite_vss connection = sqlite3.connect(db_file) connection.row_factory = sqlite3.Row connection.enable_load_extension(True) sqlite_vss.load(connection) connection.enable_load_extension(False) return connection def get_dimensionality(self) -> int: """ Function that does a dummy embedding to figure out how many dimensions this embedding function returns. Needed for the virtual table DDL. """ dummy_text = "This is a dummy text" dummy_embedding = self._embedding.embed_query(dummy_text) return len(dummy_embedding)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~retrievers~test_zep.py
from __future__ import annotations import copy from typing import TYPE_CHECKING, List import pytest from libs.core.langchain_core.documents import Document from pytest_mock import MockerFixture from langchain_community.retrievers import ZepRetriever if TYPE_CHECKING: from zep_python import MemorySearchResult, ZepClient @pytest.fixture def search_results() -> List[MemorySearchResult]: from zep_python import MemorySearchResult, Message search_result = [ { "message": { "uuid": "66830914-19f5-490b-8677-1ba06bcd556b", "created_at": "2023-05-18T20:40:42.743773Z", "role": "user", "content": "I'm looking to plan a trip to Iceland. Can you help me?", "token_count": 17, }, "summary": None, "dist": 0.8734284910450115, }, { "message": { "uuid": "015e618c-ba9d-45b6-95c3-77a8e611570b", "created_at": "2023-05-18T20:40:42.743773Z", "role": "user", "content": "How much does a trip to Iceland typically cost?", "token_count": 12, }, "summary": None, "dist": 0.8554048017463456, }, ] return [ MemorySearchResult( message=Message.parse_obj(result["message"]), summary=result["summary"], dist=result["dist"], ) for result in search_result ] @pytest.fixture @pytest.mark.requires("zep_python") def zep_retriever( mocker: MockerFixture, search_results: List[MemorySearchResult] ) -> ZepRetriever: mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True) mock_zep_client.memory = mocker.patch( "zep_python.memory.client.MemoryClient", autospec=True ) mock_zep_client.memory.search_memory.return_value = copy.deepcopy( # type: ignore search_results ) mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy( # type: ignore search_results ) zep = ZepRetriever(session_id="123", url="http://localhost:8000") zep.zep_client = mock_zep_client return zep @pytest.mark.requires("zep_python") def test_zep_retriever_get_relevant_documents( zep_retriever: ZepRetriever, search_results: List[MemorySearchResult] ) -> None: documents: List[Document] = zep_retriever.get_relevant_documents( query="My trip to Iceland" ) _test_documents(documents, search_results) @pytest.mark.requires("zep_python") async def test_zep_retriever_aget_relevant_documents( zep_retriever: ZepRetriever, search_results: List[MemorySearchResult] ) -> None: documents: List[Document] = await zep_retriever.aget_relevant_documents( query="My trip to Iceland" ) _test_documents(documents, search_results) def _test_documents( documents: List[Document], search_results: List[MemorySearchResult] ) -> None: assert len(documents) == 2 for i, document in enumerate(documents): assert document.page_content == search_results[i].message.get( # type: ignore "content" ) assert document.metadata.get("uuid") == search_results[i].message.get( # type: ignore "uuid" ) assert document.metadata.get("role") == search_results[i].message.get( # type: ignore "role" ) assert document.metadata.get("score") == search_results[i].dist
[ "How much does a trip to Iceland typically cost?", "I'm looking to plan a trip to Iceland. Can you help me?" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~baiducloud_bos_directory.py
from typing import Any, Iterator, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class BaiduBOSDirectoryLoader(BaseLoader): """Load from `Baidu BOS directory`.""" def __init__(self, conf: Any, bucket: str, prefix: str = ""): """Initialize with BOS config, bucket and prefix. :param conf(BosConfig): BOS config. :param bucket(str): BOS bucket. :param prefix(str): prefix. """ self.conf = conf self.bucket = bucket self.prefix = prefix def load(self) -> List[Document]: return list(self.lazy_load()) def lazy_load(self) -> Iterator[Document]: """Load documents.""" try: from baidubce.services.bos.bos_client import BosClient except ImportError: raise ImportError( "Please install bce-python-sdk with `pip install bce-python-sdk`." ) client = BosClient(self.conf) contents = [] marker = "" while True: response = client.list_objects( bucket_name=self.bucket, prefix=self.prefix, marker=marker, max_keys=1000, ) contents_len = len(response.contents) contents.extend(response.contents) if response.is_truncated or contents_len < int(str(response.max_keys)): break marker = response.next_marker from langchain_community.document_loaders.baiducloud_bos_file import ( BaiduBOSFileLoader, ) for content in contents: if str(content.key).endswith("/"): continue loader = BaiduBOSFileLoader(self.conf, self.bucket, str(content.key)) yield loader.load()[0]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~javelin_ai_gateway.py
import logging from typing import Any, Dict, List, Mapping, Optional, cast from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.messages import ( AIMessage, BaseMessage, ChatMessage, FunctionMessage, HumanMessage, SystemMessage, ) from libs.core.langchain_core.outputs import ( ChatGeneration, ChatResult, ) from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr logger = logging.getLogger(__name__) # Ignoring type because below is valid pydantic code # Unexpected keyword argument "extra" for "__init_subclass__" of "object" [call-arg] class ChatParams(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Parameters for the `Javelin AI Gateway` LLM.""" temperature: float = 0.0 stop: Optional[List[str]] = None max_tokens: Optional[int] = None class ChatJavelinAIGateway(BaseChatModel): """`Javelin AI Gateway` chat models API. To use, you should have the ``javelin_sdk`` python package installed. For more information, see https://docs.getjavelin.io Example: .. code-block:: python from langchain_community.chat_models import ChatJavelinAIGateway chat = ChatJavelinAIGateway( gateway_uri="<javelin-ai-gateway-uri>", route="<javelin-ai-gateway-chat-route>", params={ "temperature": 0.1 } ) """ route: str """The route to use for the Javelin AI Gateway API.""" gateway_uri: Optional[str] = None """The URI for the Javelin AI Gateway API.""" params: Optional[ChatParams] = None """Parameters for the Javelin AI Gateway LLM.""" client: Any """javelin client.""" javelin_api_key: Optional[SecretStr] = None """The API key for the Javelin AI Gateway.""" def __init__(self, **kwargs: Any): try: from javelin_sdk import ( JavelinClient, UnauthorizedError, ) except ImportError: raise ImportError( "Could not import javelin_sdk python package. " "Please install it with `pip install javelin_sdk`." ) super().__init__(**kwargs) if self.gateway_uri: try: self.client = JavelinClient( base_url=self.gateway_uri, api_key=cast(SecretStr, self.javelin_api_key).get_secret_value(), ) except UnauthorizedError as e: raise ValueError("Javelin: Incorrect API Key.") from e @property def _default_params(self) -> Dict[str, Any]: params: Dict[str, Any] = { "gateway_uri": self.gateway_uri, "javelin_api_key": cast(SecretStr, self.javelin_api_key).get_secret_value(), "route": self.route, **(self.params.dict() if self.params else {}), } return params def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts = [ ChatJavelinAIGateway._convert_message_to_dict(message) for message in messages ] data: Dict[str, Any] = { "messages": message_dicts, **(self.params.dict() if self.params else {}), } resp = self.client.query_route(self.route, query_body=data) return ChatJavelinAIGateway._create_chat_result(resp.dict()) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts = [ ChatJavelinAIGateway._convert_message_to_dict(message) for message in messages ] data: Dict[str, Any] = { "messages": message_dicts, **(self.params.dict() if self.params else {}), } resp = await self.client.aquery_route(self.route, query_body=data) return ChatJavelinAIGateway._create_chat_result(resp.dict()) @property def _identifying_params(self) -> Dict[str, Any]: return self._default_params def _get_invocation_params( self, stop: Optional[List[str]] = None, **kwargs: Any ) -> Dict[str, Any]: """Get the parameters used to invoke the model FOR THE CALLBACKS.""" return { **self._default_params, **super()._get_invocation_params(stop=stop, **kwargs), } @property def _llm_type(self) -> str: """Return type of chat model.""" return "javelin-ai-gateway-chat" @staticmethod def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] content = _dict["content"] if role == "user": return HumanMessage(content=content) elif role == "assistant": return AIMessage(content=content) elif role == "system": return SystemMessage(content=content) else: return ChatMessage(content=content, role=role) @staticmethod def _raise_functions_not_supported() -> None: raise ValueError( "Function messages are not supported by the Javelin AI Gateway. Please" " create a feature request at https://docs.getjavelin.io" ) @staticmethod def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, FunctionMessage): raise ValueError( "Function messages are not supported by the Javelin AI Gateway. Please" " create a feature request at https://docs.getjavelin.io" ) else: raise ValueError(f"Got unknown message type: {message}") if "function_call" in message.additional_kwargs: ChatJavelinAIGateway._raise_functions_not_supported() if message.additional_kwargs: logger.warning( "Additional message arguments are unsupported by Javelin AI Gateway " " and will be ignored: %s", message.additional_kwargs, ) return message_dict @staticmethod def _create_chat_result(response: Mapping[str, Any]) -> ChatResult: generations = [] for candidate in response["llm_response"]["choices"]: message = ChatJavelinAIGateway._convert_dict_to_message( candidate["message"] ) message_metadata = candidate.get("metadata", {}) gen = ChatGeneration( message=message, generation_info=dict(message_metadata), ) generations.append(gen) response_metadata = response.get("metadata", {}) return ChatResult(generations=generations, llm_output=response_metadata)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~callbacks~confident_callback.py
# flake8: noqa import os import warnings from typing import Any, Dict, List, Optional, Union from libs.core.langchain_core.callbacks import BaseCallbackHandler from libs.core.langchain_core.agents import AgentAction, AgentFinish from libs.core.langchain_core.outputs import LLMResult class DeepEvalCallbackHandler(BaseCallbackHandler): """Callback Handler that logs into deepeval. Args: implementation_name: name of the `implementation` in deepeval metrics: A list of metrics Raises: ImportError: if the `deepeval` package is not installed. Examples: >>> from langchain_community.llms import OpenAI >>> from langchain_community.callbacks import DeepEvalCallbackHandler >>> from deepeval.metrics import AnswerRelevancy >>> metric = AnswerRelevancy(minimum_score=0.3) >>> deepeval_callback = DeepEvalCallbackHandler( ... implementation_name="exampleImplementation", ... metrics=[metric], ... ) >>> llm = OpenAI( ... temperature=0, ... callbacks=[deepeval_callback], ... verbose=True, ... openai_api_key="API_KEY_HERE", ... ) >>> llm.generate([ ... "What is the best evaluation tool out there? (no bias at all)", ... ]) "Deepeval, no doubt about it." """ REPO_URL: str = "https://github.com/confident-ai/deepeval" ISSUES_URL: str = f"{REPO_URL}/issues" BLOG_URL: str = "https://docs.confident-ai.com" # noqa: E501 def __init__( self, metrics: List[Any], implementation_name: Optional[str] = None, ) -> None: """Initializes the `deepevalCallbackHandler`. Args: implementation_name: Name of the implementation you want. metrics: What metrics do you want to track? Raises: ImportError: if the `deepeval` package is not installed. ConnectionError: if the connection to deepeval fails. """ super().__init__() # Import deepeval (not via `import_deepeval` to keep hints in IDEs) try: import deepeval # ignore: F401,I001 except ImportError: raise ImportError( """To use the deepeval callback manager you need to have the `deepeval` Python package installed. Please install it with `pip install deepeval`""" ) if os.path.exists(".deepeval"): warnings.warn( """You are currently not logging anything to the dashboard, we recommend using `deepeval login`.""" ) # Set the deepeval variables self.implementation_name = implementation_name self.metrics = metrics warnings.warn( ( "The `DeepEvalCallbackHandler` is currently in beta and is subject to" " change based on updates to `langchain`. Please report any issues to" f" {self.ISSUES_URL} as an `integration` issue." ), ) def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Store the prompts""" self.prompts = prompts def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing when a new token is generated.""" pass def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Log records to deepeval when an LLM ends.""" from deepeval.metrics.answer_relevancy import AnswerRelevancy from deepeval.metrics.bias_classifier import UnBiasedMetric from deepeval.metrics.metric import Metric from deepeval.metrics.toxic_classifier import NonToxicMetric for metric in self.metrics: for i, generation in enumerate(response.generations): # Here, we only measure the first generation's output output = generation[0].text query = self.prompts[i] if isinstance(metric, AnswerRelevancy): result = metric.measure( output=output, query=query, ) print(f"Answer Relevancy: {result}") elif isinstance(metric, UnBiasedMetric): score = metric.measure(output) print(f"Bias Score: {score}") elif isinstance(metric, NonToxicMetric): score = metric.measure(output) print(f"Toxic Score: {score}") else: raise ValueError( f"""Metric {metric.__name__} is not supported by deepeval callbacks.""" ) def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM outputs an error.""" pass def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Do nothing when chain starts""" pass def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing when chain ends.""" pass def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM chain outputs an error.""" pass def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" pass def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" pass def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" pass def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when tool outputs an error.""" pass def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing""" pass def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing""" pass
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~slack~schedule_message.py
import logging from datetime import datetime as dt from typing import Optional, Type from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.tools.slack.base import SlackBaseTool from langchain_community.tools.slack.utils import UTC_FORMAT logger = logging.getLogger(__name__) class ScheduleMessageSchema(BaseModel): """Input for ScheduleMessageTool.""" message: str = Field( ..., description="The message to be sent.", ) channel: str = Field( ..., description="The channel, private group, or IM channel to send message to.", ) timestamp: str = Field( ..., description="The datetime for when the message should be sent in the " ' following format: YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date ' " and time components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC).", ) class SlackScheduleMessage(SlackBaseTool): """Tool for scheduling a message in Slack.""" name: str = "schedule_message" description: str = ( "Use this tool to schedule a message to be sent on a specific date and time." ) args_schema: Type[ScheduleMessageSchema] = ScheduleMessageSchema def _run( self, message: str, channel: str, timestamp: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: unix_timestamp = dt.timestamp(dt.strptime(timestamp, UTC_FORMAT)) result = self.client.chat_scheduleMessage( channel=channel, text=message, post_at=unix_timestamp ) output = "Message scheduled: " + str(result) return output except Exception as e: return "Error scheduling message: {}".format(e)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~azure_cognitive_services~form_recognizer.py
from __future__ import annotations import logging from typing import Any, Dict, List, Optional from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import root_validator from libs.core.langchain_core.tools import BaseTool from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.tools.azure_cognitive_services.utils import ( detect_file_src_type, ) logger = logging.getLogger(__name__) class AzureCogsFormRecognizerTool(BaseTool): """Tool that queries the Azure Cognitive Services Form Recognizer API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_endpoint: str = "" #: :meta private: doc_analysis_client: Any #: :meta private: name: str = "azure_cognitive_services_form_recognizer" description: str = ( "A wrapper around Azure Cognitive Services Form Recognizer. " "Useful for when you need to " "extract text, tables, and key-value pairs from documents. " "Input should be a url to a document." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_endpoint = get_from_dict_or_env( values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" ) try: from azure.ai.formrecognizer import DocumentAnalysisClient from azure.core.credentials import AzureKeyCredential values["doc_analysis_client"] = DocumentAnalysisClient( endpoint=azure_cogs_endpoint, credential=AzureKeyCredential(azure_cogs_key), ) except ImportError: raise ImportError( "azure-ai-formrecognizer is not installed. " "Run `pip install azure-ai-formrecognizer` to install." ) return values def _parse_tables(self, tables: List[Any]) -> List[Any]: result = [] for table in tables: rc, cc = table.row_count, table.column_count _table = [["" for _ in range(cc)] for _ in range(rc)] for cell in table.cells: _table[cell.row_index][cell.column_index] = cell.content result.append(_table) return result def _parse_kv_pairs(self, kv_pairs: List[Any]) -> List[Any]: result = [] for kv_pair in kv_pairs: key = kv_pair.key.content if kv_pair.key else "" value = kv_pair.value.content if kv_pair.value else "" result.append((key, value)) return result def _document_analysis(self, document_path: str) -> Dict: document_src_type = detect_file_src_type(document_path) if document_src_type == "local": with open(document_path, "rb") as document: poller = self.doc_analysis_client.begin_analyze_document( "prebuilt-document", document ) elif document_src_type == "remote": poller = self.doc_analysis_client.begin_analyze_document_from_url( "prebuilt-document", document_path ) else: raise ValueError(f"Invalid document path: {document_path}") result = poller.result() res_dict = {} if result.content is not None: res_dict["content"] = result.content if result.tables is not None: res_dict["tables"] = self._parse_tables(result.tables) if result.key_value_pairs is not None: res_dict["key_value_pairs"] = self._parse_kv_pairs(result.key_value_pairs) return res_dict def _format_document_analysis_result(self, document_analysis_result: Dict) -> str: formatted_result = [] if "content" in document_analysis_result: formatted_result.append( f"Content: {document_analysis_result['content']}".replace("\n", " ") ) if "tables" in document_analysis_result: for i, table in enumerate(document_analysis_result["tables"]): formatted_result.append(f"Table {i}: {table}".replace("\n", " ")) if "key_value_pairs" in document_analysis_result: for kv_pair in document_analysis_result["key_value_pairs"]: formatted_result.append( f"{kv_pair[0]}: {kv_pair[1]}".replace("\n", " ") ) return "\n".join(formatted_result) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: document_analysis_result = self._document_analysis(query) if not document_analysis_result: return "No good document analysis result was found" return self._format_document_analysis_result(document_analysis_result) except Exception as e: raise RuntimeError(f"Error while running AzureCogsFormRecognizerTool: {e}")
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_message_histories~rocksetdb.py
from datetime import datetime from time import sleep from typing import Any, Callable, List, Union from uuid import uuid4 from libs.core.langchain_core.chat_history import BaseChatMessageHistory from libs.core.langchain_core.messages import ( BaseMessage, message_to_dict, messages_from_dict, ) class RocksetChatMessageHistory(BaseChatMessageHistory): """Uses Rockset to store chat messages. To use, ensure that the `rockset` python package installed. Example: .. code-block:: python from langchain_community.chat_message_histories import ( RocksetChatMessageHistory ) from rockset import RocksetClient history = RocksetChatMessageHistory( session_id="MySession", client=RocksetClient(), collection="langchain_demo", sync=True ) history.add_user_message("hi!") history.add_ai_message("whats up?") print(history.messages) """ # You should set these values based on your VI. # These values are configured for the typical # free VI. Read more about VIs here: # https://rockset.com/docs/instances SLEEP_INTERVAL_MS: int = 5 ADD_TIMEOUT_MS: int = 5000 CREATE_TIMEOUT_MS: int = 20000 def _wait_until(self, method: Callable, timeout: int, **method_params: Any) -> None: """Sleeps until meth() evaluates to true. Passes kwargs into meth. """ start = datetime.now() while not method(**method_params): curr = datetime.now() if (curr - start).total_seconds() * 1000 > timeout: raise TimeoutError(f"{method} timed out at {timeout} ms") sleep(RocksetChatMessageHistory.SLEEP_INTERVAL_MS / 1000) def _query(self, query: str, **query_params: Any) -> List[Any]: """Executes an SQL statement and returns the result Args: - query: The SQL string - **query_params: Parameters to pass into the query """ return self.client.sql(query, params=query_params).results def _create_collection(self) -> None: """Creates a collection for this message history""" self.client.Collections.create_s3_collection( name=self.collection, workspace=self.workspace ) def _collection_exists(self) -> bool: """Checks whether a collection exists for this message history""" try: self.client.Collections.get(collection=self.collection) except self.rockset.exceptions.NotFoundException: return False return True def _collection_is_ready(self) -> bool: """Checks whether the collection for this message history is ready to be queried """ return ( self.client.Collections.get(collection=self.collection).data.status == "READY" ) def _document_exists(self) -> bool: return ( len( self._query( f""" SELECT 1 FROM {self.location} WHERE _id=:session_id LIMIT 1 """, session_id=self.session_id, ) ) != 0 ) def _wait_until_collection_created(self) -> None: """Sleeps until the collection for this message history is ready to be queried """ self._wait_until( lambda: self._collection_is_ready(), RocksetChatMessageHistory.CREATE_TIMEOUT_MS, ) def _wait_until_message_added(self, message_id: str) -> None: """Sleeps until a message is added to the messages list""" self._wait_until( lambda message_id: len( self._query( f""" SELECT * FROM UNNEST(( SELECT {self.messages_key} FROM {self.location} WHERE _id = :session_id )) AS message WHERE message.data.additional_kwargs.id = :message_id LIMIT 1 """, session_id=self.session_id, message_id=message_id, ), ) != 0, RocksetChatMessageHistory.ADD_TIMEOUT_MS, message_id=message_id, ) def _create_empty_doc(self) -> None: """Creates or replaces a document for this message history with no messages""" self.client.Documents.add_documents( collection=self.collection, workspace=self.workspace, data=[{"_id": self.session_id, self.messages_key: []}], ) def __init__( self, session_id: str, client: Any, collection: str, workspace: str = "commons", messages_key: str = "messages", sync: bool = False, message_uuid_method: Callable[[], Union[str, int]] = lambda: str(uuid4()), ) -> None: """Constructs a new RocksetChatMessageHistory. Args: - session_id: The ID of the chat session - client: The RocksetClient object to use to query - collection: The name of the collection to use to store chat messages. If a collection with the given name does not exist in the workspace, it is created. - workspace: The workspace containing `collection`. Defaults to `"commons"` - messages_key: The DB column containing message history. Defaults to `"messages"` - sync: Whether to wait for messages to be added. Defaults to `False`. NOTE: setting this to `True` will slow down performance. - message_uuid_method: The method that generates message IDs. If set, all messages will have an `id` field within the `additional_kwargs` property. If this param is not set and `sync` is `False`, message IDs will not be created. If this param is not set and `sync` is `True`, the `uuid.uuid4` method will be used to create message IDs. """ try: import rockset except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) if not isinstance(client, rockset.RocksetClient): raise ValueError( f"client should be an instance of rockset.RocksetClient, " f"got {type(client)}" ) self.session_id = session_id self.client = client self.collection = collection self.workspace = workspace self.location = f'"{self.workspace}"."{self.collection}"' self.rockset = rockset self.messages_key = messages_key self.message_uuid_method = message_uuid_method self.sync = sync try: self.client.set_application("langchain") except AttributeError: # ignore pass if not self._collection_exists(): self._create_collection() self._wait_until_collection_created() self._create_empty_doc() elif not self._document_exists(): self._create_empty_doc() @property def messages(self) -> List[BaseMessage]: # type: ignore """Messages in this chat history.""" return messages_from_dict( self._query( f""" SELECT * FROM UNNEST (( SELECT "{self.messages_key}" FROM {self.location} WHERE _id = :session_id )) """, session_id=self.session_id, ) ) def add_message(self, message: BaseMessage) -> None: """Add a Message object to the history. Args: message: A BaseMessage object to store. """ if self.sync and "id" not in message.additional_kwargs: message.additional_kwargs["id"] = self.message_uuid_method() self.client.Documents.patch_documents( collection=self.collection, workspace=self.workspace, data=[ self.rockset.model.patch_document.PatchDocument( id=self.session_id, patch=[ self.rockset.model.patch_operation.PatchOperation( op="ADD", path=f"/{self.messages_key}/-", value=message_to_dict(message), ) ], ) ], ) if self.sync: self._wait_until_message_added(message.additional_kwargs["id"]) def clear(self) -> None: """Removes all messages from the chat history""" self._create_empty_doc() if self.sync: self._wait_until( lambda: not self.messages, RocksetChatMessageHistory.ADD_TIMEOUT_MS, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~bookend.py
"""Wrapper around Bookend AI embedding models.""" import json from typing import Any, List import requests from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, Field API_URL = "https://api.bookend.ai/" DEFAULT_TASK = "embeddings" PATH = "/models/predict" class BookendEmbeddings(BaseModel, Embeddings): """Bookend AI sentence_transformers embedding models. Example: .. code-block:: python from langchain_community.embeddings import BookendEmbeddings bookend = BookendEmbeddings( domain={domain} api_token={api_token} model_id={model_id} ) bookend.embed_documents([ "Please put on these earmuffs because I can't you hear.", "Baby wipes are made of chocolate stardust.", ]) bookend.embed_query( "She only paints with bold colors; she does not like pastels." ) """ domain: str """Request for a domain at https://bookend.ai/ to use this embeddings module.""" api_token: str """Request for an API token at https://bookend.ai/ to use this embeddings module.""" model_id: str """Embeddings model ID to use.""" auth_header: dict = Field(default_factory=dict) def __init__(self, **kwargs: Any): super().__init__(**kwargs) self.auth_header = {"Authorization": "Basic {}".format(self.api_token)} def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a Bookend deployed embeddings model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ result = [] headers = self.auth_header headers["Content-Type"] = "application/json; charset=utf-8" params = { "model_id": self.model_id, "task": DEFAULT_TASK, } for text in texts: data = json.dumps( {"text": text, "question": None, "context": None, "instruction": None} ) r = requests.request( "POST", API_URL + self.domain + PATH, headers=headers, params=params, data=data, ) result.append(r.json()[0]["data"]) return result def embed_query(self, text: str) -> List[float]: """Embed a query using a Bookend deployed embeddings model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed_documents([text])[0]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~tensorflow_hub.py
from typing import Any, List from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" class TensorflowHubEmbeddings(BaseModel, Embeddings): """TensorflowHub embedding models. To use, you should have the ``tensorflow_text`` python package installed. Example: .. code-block:: python from langchain_community.embeddings import TensorflowHubEmbeddings url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" tf = TensorflowHubEmbeddings(model_url=url) """ embed: Any #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the tensorflow_hub and tensorflow_text.""" super().__init__(**kwargs) try: import tensorflow_hub except ImportError: raise ImportError( "Could not import tensorflow-hub python package. " "Please install it with `pip install tensorflow-hub``." ) try: import tensorflow_text # noqa except ImportError: raise ImportError( "Could not import tensorflow_text python package. " "Please install it with `pip install tensorflow_text``." ) self.embed = tensorflow_hub.load(self.model_url) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a TensorflowHub embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.embed(texts).numpy() return embeddings.tolist() def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.embed([text]).numpy()[0] return embedding.tolist()
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~retrievers~test_google_docai_warehoure_retriever.py
"""Test Google Cloud Document AI Warehouse retriever.""" import os from libs.core.langchain_core.documents import Document from langchain_community.retrievers import GoogleDocumentAIWarehouseRetriever def test_google_documentai_warehoure_retriever() -> None: """In order to run this test, you should provide a project_id and user_ldap. Example: export USER_LDAP=... export PROJECT_NUMBER=... """ project_number = os.environ["PROJECT_NUMBER"] user_ldap = os.environ["USER_LDAP"] docai_wh_retriever = GoogleDocumentAIWarehouseRetriever( project_number=project_number ) documents = docai_wh_retriever.get_relevant_documents( "What are Alphabet's Other Bets?", user_ldap=user_ldap ) assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~vectorstores~marqo.py
from __future__ import annotations import json import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union, ) from libs.core.langchain_core.documents import Document from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.vectorstores import VectorStore if TYPE_CHECKING: import marqo class Marqo(VectorStore): """`Marqo` vector store. Marqo indexes have their own models associated with them to generate your embeddings. This means that you can selected from a range of different models and also use CLIP models to create multimodal indexes with images and text together. Marqo also supports more advanced queries with multiple weighted terms, see See https://docs.marqo.ai/latest/#searching-using-weights-in-queries. This class can flexibly take strings or dictionaries for weighted queries in its similarity search methods. To use, you should have the `marqo` python package installed, you can do this with `pip install marqo`. Example: .. code-block:: python import marqo from langchain_community.vectorstores import Marqo client = marqo.Client(url=os.environ["MARQO_URL"], ...) vectorstore = Marqo(client, index_name) """ def __init__( self, client: marqo.Client, index_name: str, add_documents_settings: Optional[Dict[str, Any]] = None, searchable_attributes: Optional[List[str]] = None, page_content_builder: Optional[Callable[[Dict[str, Any]], str]] = None, ): """Initialize with Marqo client.""" try: import marqo except ImportError: raise ImportError( "Could not import marqo python package. " "Please install it with `pip install marqo`." ) if not isinstance(client, marqo.Client): raise ValueError( f"client should be an instance of marqo.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._add_documents_settings = ( {} if add_documents_settings is None else add_documents_settings ) self._searchable_attributes = searchable_attributes self.page_content_builder = page_content_builder self.tensor_fields = ["text"] self._document_batch_size = 1024 @property def embeddings(self) -> Optional[Embeddings]: return None def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Marqo. You can either have marqo generate ids for each document or you can provide your own by including a "_id" field in the metadata objects. Args: texts (Iterable[str]): am iterator of texts - assumed to preserve an order that matches the metadatas. metadatas (Optional[List[dict]], optional): a list of metadatas. Raises: ValueError: if metadatas is provided and the number of metadatas differs from the number of texts. Returns: List[str]: The list of ids that were added. """ if self._client.index(self._index_name).get_settings()["index_defaults"][ "treat_urls_and_pointers_as_images" ]: raise ValueError( "Marqo.add_texts is disabled for multimodal indexes. To add documents " "with a multimodal index use the Python client for Marqo directly." ) documents: List[Dict[str, str]] = [] num_docs = 0 for i, text in enumerate(texts): doc = { "text": text, "metadata": json.dumps(metadatas[i]) if metadatas else json.dumps({}), } documents.append(doc) num_docs += 1 ids = [] for i in range(0, num_docs, self._document_batch_size): response = self._client.index(self._index_name).add_documents( documents[i : i + self._document_batch_size], tensor_fields=self.tensor_fields, **self._add_documents_settings, ) if response["errors"]: err_msg = ( f"Error in upload for documents in index range [{i}," f"{i + self._document_batch_size}], " f"check Marqo logs." ) raise RuntimeError(err_msg) ids += [item["_id"] for item in response["items"]] return ids def similarity_search( self, query: Union[str, Dict[str, float]], k: int = 4, **kwargs: Any, ) -> List[Document]: """Search the marqo index for the most similar documents. Args: query (Union[str, Dict[str, float]]): The query for the search, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Document]: k documents ordered from best to worst match. """ results = self.marqo_similarity_search(query=query, k=k) documents = self._construct_documents_from_results_without_score(results) return documents def similarity_search_with_score( self, query: Union[str, Dict[str, float]], k: int = 4, ) -> List[Tuple[Document, float]]: """Return documents from Marqo that are similar to the query as well as their scores. Args: query (str): The query to search with, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: The matching documents and their scores, ordered by descending score. """ results = self.marqo_similarity_search(query=query, k=k) scored_documents = self._construct_documents_from_results_with_score(results) return scored_documents def bulk_similarity_search( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4, **kwargs: Any, ) -> List[List[Document]]: """Search the marqo index for the most similar documents in bulk with multiple queries. Args: queries (Iterable[Union[str, Dict[str, float]]]): An iterable of queries to execute in bulk, queries in the list can be strings or dictionaries of weighted queries. k (int, optional): The number of documents to return for each query. Defaults to 4. Returns: List[List[Document]]: A list of results for each query. """ bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k) bulk_documents: List[List[Document]] = [] for results in bulk_results["result"]: documents = self._construct_documents_from_results_without_score(results) bulk_documents.append(documents) return bulk_documents def bulk_similarity_search_with_score( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4, **kwargs: Any, ) -> List[List[Tuple[Document, float]]]: """Return documents from Marqo that are similar to the query as well as their scores using a batch of queries. Args: query (Iterable[Union[str, Dict[str, float]]]): An iterable of queries to execute in bulk, queries in the list can be strings or dictionaries of weighted queries. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: A list of lists of the matching documents and their scores for each query """ bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k) bulk_documents: List[List[Tuple[Document, float]]] = [] for results in bulk_results["result"]: documents = self._construct_documents_from_results_with_score(results) bulk_documents.append(documents) return bulk_documents def _construct_documents_from_results_with_score( self, results: Dict[str, List[Dict[str, str]]] ) -> List[Tuple[Document, Any]]: """Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true. """ documents: List[Tuple[Document, Any]] = [] for res in results["hits"]: if self.page_content_builder is None: text = res["text"] else: text = self.page_content_builder(res) metadata = json.loads(res.get("metadata", "{}")) documents.append( (Document(page_content=text, metadata=metadata), res["_score"]) ) return documents def _construct_documents_from_results_without_score( self, results: Dict[str, List[Dict[str, str]]] ) -> List[Document]: """Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true. """ documents: List[Document] = [] for res in results["hits"]: if self.page_content_builder is None: text = res["text"] else: text = self.page_content_builder(res) metadata = json.loads(res.get("metadata", "{}")) documents.append(Document(page_content=text, metadata=metadata)) return documents def marqo_similarity_search( self, query: Union[str, Dict[str, float]], k: int = 4, ) -> Dict[str, List[Dict[str, str]]]: """Return documents from Marqo exposing Marqo's output directly Args: query (str): The query to search with. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Dict[str, Any]]: This hits from marqo. """ results = self._client.index(self._index_name).search( q=query, searchable_attributes=self._searchable_attributes, limit=k ) return results def marqo_bulk_similarity_search( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4 ) -> Dict[str, List[Dict[str, List[Dict[str, str]]]]]: """Return documents from Marqo using a bulk search, exposes Marqo's output directly Args: queries (Iterable[Union[str, Dict[str, float]]]): A list of queries. k (int, optional): The number of documents to return for each query. Defaults to 4. Returns: Dict[str, Dict[List[Dict[str, Dict[str, Any]]]]]: A bulk search results object """ bulk_results = { "result": [ self._client.index(self._index_name).search( q=query, searchable_attributes=self._searchable_attributes, limit=k ) for query in queries ] } return bulk_results @classmethod def from_documents( cls: Type[Marqo], documents: List[Document], embedding: Union[Embeddings, None] = None, **kwargs: Any, ) -> Marqo: """Return VectorStore initialized from documents. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. Args: documents (List[Document]): Input documents embedding (Any, optional): Embeddings (not required). Defaults to None. Returns: VectorStore: A Marqo vectorstore """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts(texts, metadatas=metadatas, **kwargs) @classmethod def from_texts( cls, texts: List[str], embedding: Any = None, metadatas: Optional[List[dict]] = None, index_name: str = "", url: str = "http://localhost:8882", api_key: str = "", add_documents_settings: Optional[Dict[str, Any]] = None, searchable_attributes: Optional[List[str]] = None, page_content_builder: Optional[Callable[[Dict[str, str]], str]] = None, index_settings: Optional[Dict[str, Any]] = None, verbose: bool = True, **kwargs: Any, ) -> Marqo: """Return Marqo initialized from texts. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. This is a quick way to get started with marqo - simply provide your texts and metadatas and this will create an instance of the data store and index the provided data. To know the ids of your documents with this approach you will need to include them in under the key "_id" in your metadatas for each text Example: .. code-block:: python from langchain_community.vectorstores import Marqo datastore = Marqo(texts=['text'], index_name='my-first-index', url='http://localhost:8882') Args: texts (List[str]): A list of texts to index into marqo upon creation. embedding (Any, optional): Embeddings (not required). Defaults to None. index_name (str, optional): The name of the index to use, if none is provided then one will be created with a UUID. Defaults to None. url (str, optional): The URL for Marqo. Defaults to "http://localhost:8882". api_key (str, optional): The API key for Marqo. Defaults to "". metadatas (Optional[List[dict]], optional): A list of metadatas, to accompany the texts. Defaults to None. this is only used when a new index is being created. Defaults to "cpu". Can be "cpu" or "cuda". add_documents_settings (Optional[Dict[str, Any]], optional): Settings for adding documents, see https://docs.marqo.ai/0.0.16/API-Reference/documents/#query-parameters. Defaults to {}. index_settings (Optional[Dict[str, Any]], optional): Index settings if the index doesn't exist, see https://docs.marqo.ai/0.0.16/API-Reference/indexes/#index-defaults-object. Defaults to {}. Returns: Marqo: An instance of the Marqo vector store """ try: import marqo except ImportError: raise ImportError( "Could not import marqo python package. " "Please install it with `pip install marqo`." ) if not index_name: index_name = str(uuid.uuid4()) client = marqo.Client(url=url, api_key=api_key) try: client.create_index(index_name, settings_dict=index_settings or {}) if verbose: print(f"Created {index_name} successfully.") except Exception: if verbose: print(f"Index {index_name} exists.") instance: Marqo = cls( client, index_name, searchable_attributes=searchable_attributes, add_documents_settings=add_documents_settings or {}, page_content_builder=page_content_builder, ) instance.add_texts(texts, metadatas) return instance def get_indexes(self) -> List[Dict[str, str]]: """Helper to see your available indexes in marqo, useful if the from_texts method was used without an index name specified Returns: List[Dict[str, str]]: The list of indexes """ return self._client.get_indexes()["results"] def get_number_of_documents(self) -> int: """Helper to see the number of documents in the index Returns: int: The number of documents """ return self._client.index(self._index_name).get_stats()["numberOfDocuments"]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~yandex.py
"""Wrapper around YandexGPT embedding models.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) logger = logging.getLogger(__name__) class YandexGPTEmbeddings(BaseModel, Embeddings): """YandexGPT Embeddings models. To use, you should have the ``yandexcloud`` python package installed. There are two authentication options for the service account with the ``ai.languageModels.user`` role: - You can specify the token in a constructor parameter `iam_token` or in an environment variable `YC_IAM_TOKEN`. - You can specify the key in a constructor parameter `api_key` or in an environment variable `YC_API_KEY`. To use the default model specify the folder ID in a parameter `folder_id` or in an environment variable `YC_FOLDER_ID`. Or specify the model URI in a constructor parameter `model_uri` Example: .. code-block:: python from langchain_community.embeddings.yandex import YandexGPTEmbeddings embeddings = YandexGPTEmbeddings(iam_token="t1.9eu...", model_uri="emb://<folder-id>/text-search-query/latest") """ iam_token: str = "" """Yandex Cloud IAM token for service account with the `ai.languageModels.user` role""" api_key: str = "" """Yandex Cloud Api Key for service account with the `ai.languageModels.user` role""" model_uri: str = "" """Model uri to use.""" folder_id: str = "" """Yandex Cloud folder ID""" model_uri: str = "" """Model uri to use.""" model_name: str = "text-search-query" """Model name to use.""" model_version: str = "latest" """Model version to use.""" url: str = "llm.api.cloud.yandex.net:443" """The url of the API.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that iam token exists in environment.""" iam_token = get_from_dict_or_env(values, "iam_token", "YC_IAM_TOKEN", "") values["iam_token"] = iam_token api_key = get_from_dict_or_env(values, "api_key", "YC_API_KEY", "") values["api_key"] = api_key folder_id = get_from_dict_or_env(values, "folder_id", "YC_FOLDER_ID", "") values["folder_id"] = folder_id if api_key == "" and iam_token == "": raise ValueError("Either 'YC_API_KEY' or 'YC_IAM_TOKEN' must be provided.") if values["iam_token"]: values["_grpc_metadata"] = [ ("authorization", f"Bearer {values['iam_token']}") ] if values["folder_id"]: values["_grpc_metadata"].append(("x-folder-id", values["folder_id"])) else: values["_grpc_metadata"] = ( ("authorization", f"Api-Key {values['api_key']}"), ) if values["model_uri"] == "" and values["folder_id"] == "": raise ValueError("Either 'model_uri' or 'folder_id' must be provided.") if not values["model_uri"]: values[ "model_uri" ] = f"emb://{values['folder_id']}/{values['model_name']}/{values['model_version']}" return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a YandexGPT embeddings models. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return _embed_with_retry(self, texts=texts) def embed_query(self, text: str) -> List[float]: """Embed a query using a YandexGPT embeddings models. Args: text: The text to embed. Returns: Embeddings for the text. """ return _embed_with_retry(self, texts=[text])[0] def _create_retry_decorator(llm: YandexGPTEmbeddings) -> Callable[[Any], Any]: from grpc import RpcError min_seconds = 1 max_seconds = 60 return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type((RpcError))), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _embed_with_retry(llm: YandexGPTEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**_kwargs: Any) -> Any: return _make_request(llm, **_kwargs) return _completion_with_retry(**kwargs) def _make_request(self: YandexGPTEmbeddings, texts: List[str]): try: import grpc from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501 TextEmbeddingRequest, ) from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501 EmbeddingsServiceStub, ) except ImportError as e: raise ImportError( "Please install YandexCloud SDK" " with `pip install yandexcloud`." ) from e result = [] channel_credentials = grpc.ssl_channel_credentials() channel = grpc.secure_channel(self.url, channel_credentials) for text in texts: request = TextEmbeddingRequest(model_uri=self.model_uri, text=text) stub = EmbeddingsServiceStub(channel) res = stub.TextEmbedding(request, metadata=self._grpc_metadata) result.append(res.embedding) return result
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~cube_semantic.py
import json import logging import time from typing import List import requests from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class CubeSemanticLoader(BaseLoader): """Load `Cube semantic layer` metadata. Args: cube_api_url: REST API endpoint. Use the REST API of your Cube's deployment. Please find out more information here: https://cube.dev/docs/http-api/rest#configuration-base-path cube_api_token: Cube API token. Authentication tokens are generated based on your Cube's API secret. Please find out more information here: https://cube.dev/docs/security#generating-json-web-tokens-jwt load_dimension_values: Whether to load dimension values for every string dimension or not. dimension_values_limit: Maximum number of dimension values to load. dimension_values_max_retries: Maximum number of retries to load dimension values. dimension_values_retry_delay: Delay between retries to load dimension values. """ def __init__( self, cube_api_url: str, cube_api_token: str, load_dimension_values: bool = True, dimension_values_limit: int = 10_000, dimension_values_max_retries: int = 10, dimension_values_retry_delay: int = 3, ): self.cube_api_url = cube_api_url self.cube_api_token = cube_api_token self.load_dimension_values = load_dimension_values self.dimension_values_limit = dimension_values_limit self.dimension_values_max_retries = dimension_values_max_retries self.dimension_values_retry_delay = dimension_values_retry_delay def _get_dimension_values(self, dimension_name: str) -> List[str]: """Makes a call to Cube's REST API load endpoint to retrieve values for dimensions. These values can be used to achieve a more accurate filtering. """ logger.info("Loading dimension values for: {dimension_name}...") headers = { "Content-Type": "application/json", "Authorization": self.cube_api_token, } query = { "query": { "dimensions": [dimension_name], "limit": self.dimension_values_limit, } } retries = 0 while retries < self.dimension_values_max_retries: response = requests.request( "POST", f"{self.cube_api_url}/load", headers=headers, data=json.dumps(query), ) if response.status_code == 200: response_data = response.json() if ( "error" in response_data and response_data["error"] == "Continue wait" ): logger.info("Retrying...") retries += 1 time.sleep(self.dimension_values_retry_delay) continue else: dimension_values = [ item[dimension_name] for item in response_data["data"] ] return dimension_values else: logger.error("Request failed with status code:", response.status_code) break if retries == self.dimension_values_max_retries: logger.info("Maximum retries reached.") return [] def load(self) -> List[Document]: """Makes a call to Cube's REST API metadata endpoint. Returns: A list of documents with attributes: - page_content=column_title + column_description - metadata - table_name - column_name - column_data_type - column_member_type - column_title - column_description - column_values - cube_data_obj_type """ headers = { "Content-Type": "application/json", "Authorization": self.cube_api_token, } logger.info(f"Loading metadata from {self.cube_api_url}...") response = requests.get(f"{self.cube_api_url}/meta", headers=headers) response.raise_for_status() raw_meta_json = response.json() cube_data_objects = raw_meta_json.get("cubes", []) logger.info(f"Found {len(cube_data_objects)} cube data objects in metadata.") if not cube_data_objects: raise ValueError("No cubes found in metadata.") docs = [] for cube_data_obj in cube_data_objects: cube_data_obj_name = cube_data_obj.get("name") cube_data_obj_type = cube_data_obj.get("type") cube_data_obj_is_public = cube_data_obj.get("public") measures = cube_data_obj.get("measures", []) dimensions = cube_data_obj.get("dimensions", []) logger.info(f"Processing {cube_data_obj_name}...") if not cube_data_obj_is_public: logger.info(f"Skipping {cube_data_obj_name} because it is not public.") continue for item in measures + dimensions: column_member_type = "measure" if item in measures else "dimension" dimension_values = [] item_name = str(item.get("name")) item_type = str(item.get("type")) if ( self.load_dimension_values and column_member_type == "dimension" and item_type == "string" ): dimension_values = self._get_dimension_values(item_name) metadata = dict( table_name=str(cube_data_obj_name), column_name=item_name, column_data_type=item_type, column_title=str(item.get("title")), column_description=str(item.get("description")), column_member_type=column_member_type, column_values=dimension_values, cube_data_obj_type=cube_data_obj_type, ) page_content = f"{str(item.get('title'))}, " page_content += f"{str(item.get('description'))}" docs.append(Document(page_content=page_content, metadata=metadata)) return docs
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~onenote.py
"""Loads data from OneNote Notebooks""" from pathlib import Path from typing import Dict, Iterator, List, Optional import requests from libs.core.langchain_core.documents import Document from libs.core.langchain_core.pydantic_v1 import ( BaseModel, BaseSettings, Field, FilePath, SecretStr, ) from langchain_community.document_loaders.base import BaseLoader class _OneNoteGraphSettings(BaseSettings): client_id: str = Field(..., env="MS_GRAPH_CLIENT_ID") client_secret: SecretStr = Field(..., env="MS_GRAPH_CLIENT_SECRET") class Config: """Config for OneNoteGraphSettings.""" env_prefix = "" case_sentive = False env_file = ".env" class OneNoteLoader(BaseLoader, BaseModel): """Load pages from OneNote notebooks.""" settings: _OneNoteGraphSettings = Field(default_factory=_OneNoteGraphSettings) """Settings for the Microsoft Graph API client.""" auth_with_token: bool = False """Whether to authenticate with a token or not. Defaults to False.""" access_token: str = "" """Personal access token""" onenote_api_base_url: str = "https://graph.microsoft.com/v1.0/me/onenote" """URL of Microsoft Graph API for OneNote""" authority_url = "https://login.microsoftonline.com/consumers/" """A URL that identifies a token authority""" token_path: FilePath = Path.home() / ".credentials" / "onenote_graph_token.txt" """Path to the file where the access token is stored""" notebook_name: Optional[str] = None """Filter on notebook name""" section_name: Optional[str] = None """Filter on section name""" page_title: Optional[str] = None """Filter on section name""" object_ids: Optional[List[str]] = None """ The IDs of the objects to load data from.""" def lazy_load(self) -> Iterator[Document]: """ Get pages from OneNote notebooks. Returns: A list of Documents with attributes: - page_content - metadata - title """ self._auth() try: from bs4 import BeautifulSoup except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it with " "`pip install bs4`" ) if self.object_ids is not None: for object_id in self.object_ids: page_content_html = self._get_page_content(object_id) soup = BeautifulSoup(page_content_html, "html.parser") page_title = "" title_tag = soup.title if title_tag: page_title = title_tag.get_text(strip=True) page_content = soup.get_text(separator="\n", strip=True) yield Document( page_content=page_content, metadata={"title": page_title} ) else: request_url = self._url while request_url != "": response = requests.get(request_url, headers=self._headers, timeout=10) response.raise_for_status() pages = response.json() for page in pages["value"]: page_id = page["id"] page_content_html = self._get_page_content(page_id) soup = BeautifulSoup(page_content_html, "html.parser") page_title = "" title_tag = soup.title if title_tag: page_content = soup.get_text(separator="\n", strip=True) yield Document( page_content=page_content, metadata={"title": page_title} ) if "@odata.nextLink" in pages: request_url = pages["@odata.nextLink"] else: request_url = "" def load(self) -> List[Document]: """ Get pages from OneNote notebooks. Returns: A list of Documents with attributes: - page_content - metadata - title """ return list(self.lazy_load()) def _get_page_content(self, page_id: str) -> str: """Get page content from OneNote API""" request_url = self.onenote_api_base_url + f"/pages/{page_id}/content" response = requests.get(request_url, headers=self._headers, timeout=10) response.raise_for_status() return response.text @property def _headers(self) -> Dict[str, str]: """Return headers for requests to OneNote API""" return { "Authorization": f"Bearer {self.access_token}", } @property def _scopes(self) -> List[str]: """Return required scopes.""" return ["Notes.Read"] def _auth(self) -> None: """Authenticate with Microsoft Graph API""" if self.access_token != "": return if self.auth_with_token: with self.token_path.open("r") as token_file: self.access_token = token_file.read() else: try: from msal import ConfidentialClientApplication except ImportError as e: raise ImportError( "MSAL package not found, please install it with `pip install msal`" ) from e client_instance = ConfidentialClientApplication( client_id=self.settings.client_id, client_credential=self.settings.client_secret.get_secret_value(), authority=self.authority_url, ) authorization_request_url = client_instance.get_authorization_request_url( self._scopes ) print("Visit the following url to give consent:") print(authorization_request_url) authorization_url = input("Paste the authenticated url here:\n") authorization_code = authorization_url.split("code=")[1].split("&")[0] access_token_json = client_instance.acquire_token_by_authorization_code( code=authorization_code, scopes=self._scopes ) self.access_token = access_token_json["access_token"] try: if not self.token_path.parent.exists(): self.token_path.parent.mkdir(parents=True) except Exception as e: raise Exception( f"Could not create the folder {self.token_path.parent} " + "to store the access token." ) from e with self.token_path.open("w") as token_file: token_file.write(self.access_token) @property def _url(self) -> str: """Create URL for getting page ids from the OneNoteApi API.""" query_params_list = [] filter_list = [] expand_list = [] query_params_list.append("$select=id") if self.notebook_name is not None: filter_list.append( "parentNotebook/displayName%20eq%20" + f"'{self.notebook_name.replace(' ', '%20')}'" ) expand_list.append("parentNotebook") if self.section_name is not None: filter_list.append( "parentSection/displayName%20eq%20" + f"'{self.section_name.replace(' ', '%20')}'" ) expand_list.append("parentSection") if self.page_title is not None: filter_list.append( "title%20eq%20" + f"'{self.page_title.replace(' ', '%20')}'" ) if len(expand_list) > 0: query_params_list.append("$expand=" + ",".join(expand_list)) if len(filter_list) > 0: query_params_list.append("$filter=" + "%20and%20".join(filter_list)) query_params = "&".join(query_params_list) if query_params != "": query_params = "?" + query_params return f"{self.onenote_api_base_url}/pages{query_params}"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~chat_models~fireworks.py
from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Type, Union, ) from libs.core.langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from libs.core.langchain_core.language_models.chat_models import BaseChatModel from libs.core.langchain_core.language_models.llms import create_base_retry_decorator from libs.core.langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ) from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from libs.core.langchain_core.pydantic_v1 import Field, SecretStr, root_validator from libs.core.langchain_core.utils import convert_to_secret_str from libs.core.langchain_core.utils.env import get_from_dict_or_env from langchain_community.adapters.openai import convert_message_to_dict def _convert_delta_to_message_chunk( _dict: Any, default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: """Convert a delta response to a message chunk.""" role = _dict.role content = _dict.content or "" additional_kwargs: Dict = {} if role == "user" or default_class == HumanMessageChunk: return HumanMessageChunk(content=content) elif role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content, additional_kwargs=additional_kwargs) elif role == "system" or default_class == SystemMessageChunk: return SystemMessageChunk(content=content) elif role == "function" or default_class == FunctionMessageChunk: return FunctionMessageChunk(content=content, name=_dict.name) elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: return default_class(content=content) def convert_dict_to_message(_dict: Any) -> BaseMessage: """Convert a dict response to a message.""" role = _dict.role content = _dict.content or "" if role == "user": return HumanMessage(content=content) elif role == "assistant": content = _dict.content additional_kwargs: Dict = {} return AIMessage(content=content, additional_kwargs=additional_kwargs) elif role == "system": return SystemMessage(content=content) elif role == "function": return FunctionMessage(content=content, name=_dict.name) else: return ChatMessage(content=content, role=role) class ChatFireworks(BaseChatModel): """Fireworks Chat models.""" model: str = "accounts/fireworks/models/llama-v2-7b-chat" model_kwargs: dict = Field( default_factory=lambda: { "temperature": 0.7, "max_tokens": 512, "top_p": 1, }.copy() ) fireworks_api_key: Optional[SecretStr] = None max_retries: int = 20 use_retry: bool = True @property def lc_secrets(self) -> Dict[str, str]: return {"fireworks_api_key": "FIREWORKS_API_KEY"} @classmethod def is_lc_serializable(cls) -> bool: return True @classmethod def get_lc_namespace(cls) -> List[str]: """Get the namespace of the langchain object.""" return ["langchain", "chat_models", "fireworks"] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key in environment.""" try: import fireworks.client except ImportError as e: raise ImportError( "Could not import fireworks-ai python package. " "Please install it with `pip install fireworks-ai`." ) from e fireworks_api_key = convert_to_secret_str( get_from_dict_or_env(values, "fireworks_api_key", "FIREWORKS_API_KEY") ) fireworks.client.api_key = fireworks_api_key.get_secret_value() return values @property def _llm_type(self) -> str: """Return type of llm.""" return "fireworks-chat" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts = self._create_message_dicts(messages) params = { "model": self.model, "messages": message_dicts, **self.model_kwargs, **kwargs, } response = completion_with_retry( self, self.use_retry, run_manager=run_manager, stop=stop, **params, ) return self._create_chat_result(response) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts = self._create_message_dicts(messages) params = { "model": self.model, "messages": message_dicts, **self.model_kwargs, **kwargs, } response = await acompletion_with_retry( self, self.use_retry, run_manager=run_manager, stop=stop, **params ) return self._create_chat_result(response) def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: if llm_outputs[0] is None: return {} return llm_outputs[0] def _create_chat_result(self, response: Any) -> ChatResult: generations = [] for res in response.choices: message = convert_dict_to_message(res.message) gen = ChatGeneration( message=message, generation_info=dict(finish_reason=res.finish_reason), ) generations.append(gen) llm_output = {"model": self.model} return ChatResult(generations=generations, llm_output=llm_output) def _create_message_dicts( self, messages: List[BaseMessage] ) -> List[Dict[str, Any]]: message_dicts = [convert_message_to_dict(m) for m in messages] return message_dicts def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: message_dicts = self._create_message_dicts(messages) default_chunk_class = AIMessageChunk params = { "model": self.model, "messages": message_dicts, "stream": True, **self.model_kwargs, **kwargs, } for chunk in completion_with_retry( self, self.use_retry, run_manager=run_manager, stop=stop, **params ): choice = chunk.choices[0] chunk = _convert_delta_to_message_chunk(choice.delta, default_chunk_class) finish_reason = choice.finish_reason generation_info = ( dict(finish_reason=finish_reason) if finish_reason is not None else None ) default_chunk_class = chunk.__class__ chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: message_dicts = self._create_message_dicts(messages) default_chunk_class = AIMessageChunk params = { "model": self.model, "messages": message_dicts, "stream": True, **self.model_kwargs, **kwargs, } async for chunk in await acompletion_with_retry_streaming( self, self.use_retry, run_manager=run_manager, stop=stop, **params ): choice = chunk.choices[0] chunk = _convert_delta_to_message_chunk(choice.delta, default_chunk_class) finish_reason = choice.finish_reason generation_info = ( dict(finish_reason=finish_reason) if finish_reason is not None else None ) default_chunk_class = chunk.__class__ chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) yield chunk if run_manager: await run_manager.on_llm_new_token(token=chunk.text, chunk=chunk) def conditional_decorator( condition: bool, decorator: Callable[[Any], Any] ) -> Callable[[Any], Any]: """Define conditional decorator. Args: condition: The condition. decorator: The decorator. Returns: The decorated function. """ def actual_decorator(func: Callable[[Any], Any]) -> Callable[[Any], Any]: if condition: return decorator(func) return func return actual_decorator def completion_with_retry( llm: ChatFireworks, use_retry: bool, *, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" import fireworks.client retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @conditional_decorator(use_retry, retry_decorator) def _completion_with_retry(**kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" return fireworks.client.ChatCompletion.create( **kwargs, ) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: ChatFireworks, use_retry: bool, *, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the async completion call.""" import fireworks.client retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @conditional_decorator(use_retry, retry_decorator) async def _completion_with_retry(**kwargs: Any) -> Any: return await fireworks.client.ChatCompletion.acreate( **kwargs, ) return await _completion_with_retry(**kwargs) async def acompletion_with_retry_streaming( llm: ChatFireworks, use_retry: bool, *, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call for streaming.""" import fireworks.client retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @conditional_decorator(use_retry, retry_decorator) async def _completion_with_retry(**kwargs: Any) -> Any: return fireworks.client.ChatCompletion.acreate( **kwargs, ) return await _completion_with_retry(**kwargs) def _create_retry_decorator( llm: ChatFireworks, run_manager: Optional[ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: """Define retry mechanism.""" import fireworks.client errors = [ fireworks.client.error.RateLimitError, fireworks.client.error.InternalServerError, fireworks.client.error.BadGatewayError, fireworks.client.error.ServiceUnavailableError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~bilibili.py
import json import re import warnings from typing import List, Tuple import requests from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class BiliBiliLoader(BaseLoader): """Load `BiliBili` video transcripts.""" def __init__(self, video_urls: List[str]): """Initialize with bilibili url. Args: video_urls: List of bilibili urls. """ self.video_urls = video_urls def load(self) -> List[Document]: """Load Documents from bilibili url.""" results = [] for url in self.video_urls: transcript, video_info = self._get_bilibili_subs_and_info(url) doc = Document(page_content=transcript, metadata=video_info) results.append(doc) return results def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]: try: from bilibili_api import sync, video except ImportError: raise ImportError( "requests package not found, please install it with " "`pip install bilibili-api-python`" ) bvid = re.search(r"BV\w+", url) if bvid is not None: v = video.Video(bvid=bvid.group()) else: aid = re.search(r"av[0-9]+", url) if aid is not None: try: v = video.Video(aid=int(aid.group()[2:])) except AttributeError: raise ValueError(f"{url} is not bilibili url.") else: raise ValueError(f"{url} is not bilibili url.") video_info = sync(v.get_info()) video_info.update({"url": url}) sub = sync(v.get_subtitle(video_info["cid"])) # Get subtitle url sub_list = sub["subtitles"] if sub_list: sub_url = sub_list[0]["subtitle_url"] if not sub_url.startswith("http"): sub_url = "https:" + sub_url result = requests.get(sub_url) raw_sub_titles = json.loads(result.content)["body"] raw_transcript = " ".join([c["content"] for c in raw_sub_titles]) raw_transcript_with_meta_info = ( f"Video Title: {video_info['title']}," f"description: {video_info['desc']}\n\n" f"Transcript: {raw_transcript}" ) return raw_transcript_with_meta_info, video_info else: raw_transcript = "" warnings.warn( f""" No subtitles found for video: {url}. Return Empty transcript. """ ) return raw_transcript, video_info
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~retrievers~test_kay.py
"""Integration test for Kay.ai API Wrapper.""" import pytest from libs.core.langchain_core.documents import Document from langchain_community.retrievers import KayAiRetriever @pytest.mark.requires("kay") def test_kay_retriever() -> None: retriever = KayAiRetriever.create( dataset_id="company", data_types=["10-K", "10-Q", "8-K", "PressRelease"], num_contexts=3, ) docs = retriever.get_relevant_documents( "What were the biggest strategy changes and partnerships made by Roku " "in 2023?", ) assert len(docs) == 3 for doc in docs: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata assert len(list(doc.metadata.items())) > 0
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~embaas.py
from typing import Any, Dict, List, Mapping, Optional import requests from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env from requests.adapters import HTTPAdapter, Retry from typing_extensions import NotRequired, TypedDict # Currently supported maximum batch size for embedding requests MAX_BATCH_SIZE = 256 EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/" class EmbaasEmbeddingsPayload(TypedDict): """Payload for the Embaas embeddings API.""" model: str texts: List[str] instruction: NotRequired[str] class EmbaasEmbeddings(BaseModel, Embeddings): """Embaas's embedding service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # initialize with default model and instruction from langchain_community.embeddings import EmbaasEmbeddings emb = EmbaasEmbeddings() # initialize with custom model and instruction from langchain_community.embeddings import EmbaasEmbeddings emb_model = "instructor-large" emb_inst = "Represent the Wikipedia document for retrieval" emb = EmbaasEmbeddings( model=emb_model, instruction=emb_inst ) """ model: str = "e5-large-v2" """The model used for embeddings.""" instruction: Optional[str] = None """Instruction used for domain-specific embeddings.""" api_url: str = EMBAAS_API_URL """The URL for the embaas embeddings API.""" embaas_api_key: Optional[str] = None """max number of retries for requests""" max_retries: Optional[int] = 3 """request timeout in seconds""" timeout: Optional[int] = 30 class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( values, "embaas_api_key", "EMBAAS_API_KEY" ) values["embaas_api_key"] = embaas_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying params.""" return {"model": self.model, "instruction": self.instruction} def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload: """Generates payload for the API request.""" payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) if self.instruction: payload["instruction"] = self.instruction return payload def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]: """Sends a request to the Embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } session = requests.Session() retries = Retry( total=self.max_retries, backoff_factor=0.5, allowed_methods=["POST"], raise_on_status=True, ) session.mount("http://", HTTPAdapter(max_retries=retries)) session.mount("https://", HTTPAdapter(max_retries=retries)) response = session.post( self.api_url, headers=headers, json=payload, timeout=self.timeout, ) parsed_response = response.json() embeddings = [item["embedding"] for item in parsed_response["data"]] return embeddings def _generate_embeddings(self, texts: List[str]) -> List[List[float]]: """Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError(f"Error raised by embaas embeddings API: {e}") parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( "Validation Error raised by embaas embeddings API:" f"{parsed_response['message']}" ) raise def embed_documents(self, texts: List[str]) -> List[List[float]]: """Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text. """ batches = [ texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE) ] embeddings = [self._generate_embeddings(batch) for batch in batches] # flatten the list of lists into a single list return [embedding for batch in embeddings for embedding in batch] def embed_query(self, text: str) -> List[float]: """Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings. """ return self.embed_documents([text])[0]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~file_management~delete.py
import os from typing import Optional, Type from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from libs.core.langchain_core.tools import BaseTool from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileDeleteInput(BaseModel): """Input for DeleteFileTool.""" file_path: str = Field(..., description="Path of the file to delete") class DeleteFileTool(BaseFileToolMixin, BaseTool): """Tool that deletes a file.""" name: str = "file_delete" args_schema: Type[BaseModel] = FileDeleteInput description: str = "Delete a file" def _run( self, file_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: file_path_ = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) if not file_path_.exists(): return f"Error: no such file or directory: {file_path}" try: os.remove(file_path_) return f"File deleted successfully: {file_path}." except Exception as e: return "Error: " + str(e) # TODO: Add aiofiles method
[ "Delete a file" ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~unstructured.py
"""Loader that uses unstructured to load files.""" import collections from abc import ABC, abstractmethod from typing import IO, Any, Callable, Dict, List, Optional, Sequence, Union from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader def satisfies_min_unstructured_version(min_version: str) -> bool: """Check if the installed `Unstructured` version exceeds the minimum version for the feature in question.""" from unstructured.__version__ import __version__ as __unstructured_version__ min_version_tuple = tuple([int(x) for x in min_version.split(".")]) # NOTE(MthwRobinson) - enables the loader to work when you're using pre-release # versions of unstructured like 0.4.17-dev1 _unstructured_version = __unstructured_version__.split("-")[0] unstructured_version_tuple = tuple( [int(x) for x in _unstructured_version.split(".")] ) return unstructured_version_tuple >= min_version_tuple def validate_unstructured_version(min_unstructured_version: str) -> None: """Raise an error if the `Unstructured` version does not exceed the specified minimum.""" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( f"unstructured>={min_unstructured_version} is required in this loader." ) class UnstructuredBaseLoader(BaseLoader, ABC): """Base Loader that uses `Unstructured`.""" def __init__( self, mode: str = "single", post_processors: Optional[List[Callable]] = None, **unstructured_kwargs: Any, ): """Initialize with file path.""" try: import unstructured # noqa:F401 except ImportError: raise ValueError( "unstructured package not found, please install it with " "`pip install unstructured`" ) _valid_modes = {"single", "elements", "paged"} if mode not in _valid_modes: raise ValueError( f"Got {mode} for `mode`, but should be one of `{_valid_modes}`" ) self.mode = mode if not satisfies_min_unstructured_version("0.5.4"): if "strategy" in unstructured_kwargs: unstructured_kwargs.pop("strategy") self.unstructured_kwargs = unstructured_kwargs self.post_processors = post_processors or [] @abstractmethod def _get_elements(self) -> List: """Get elements.""" @abstractmethod def _get_metadata(self) -> dict: """Get metadata.""" def _post_process_elements(self, elements: list) -> list: """Applies post processing functions to extracted unstructured elements. Post processing functions are str -> str callables are passed in using the post_processors kwarg when the loader is instantiated.""" for element in elements: for post_processor in self.post_processors: element.apply(post_processor) return elements def load(self) -> List[Document]: """Load file.""" elements = self._get_elements() self._post_process_elements(elements) if self.mode == "elements": docs: List[Document] = list() for element in elements: metadata = self._get_metadata() # NOTE(MthwRobinson) - the attribute check is for backward compatibility # with unstructured<0.4.9. The metadata attributed was added in 0.4.9. if hasattr(element, "metadata"): metadata.update(element.metadata.to_dict()) if hasattr(element, "category"): metadata["category"] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) elif self.mode == "paged": text_dict: Dict[int, str] = {} meta_dict: Dict[int, Dict] = {} for idx, element in enumerate(elements): metadata = self._get_metadata() if hasattr(element, "metadata"): metadata.update(element.metadata.to_dict()) page_number = metadata.get("page_number", 1) # Check if this page_number already exists in docs_dict if page_number not in text_dict: # If not, create new entry with initial text and metadata text_dict[page_number] = str(element) + "\n\n" meta_dict[page_number] = metadata else: # If exists, append to text and update the metadata text_dict[page_number] += str(element) + "\n\n" meta_dict[page_number].update(metadata) # Convert the dict to a list of Document objects docs = [ Document(page_content=text_dict[key], metadata=meta_dict[key]) for key in text_dict.keys() ] elif self.mode == "single": metadata = self._get_metadata() text = "\n\n".join([str(el) for el in elements]) docs = [Document(page_content=text, metadata=metadata)] else: raise ValueError(f"mode of {self.mode} not supported.") return docs class UnstructuredFileLoader(UnstructuredBaseLoader): """Load files using `Unstructured`. The file loader uses the unstructured partition function and will automatically detect the file type. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain_community.document_loaders import UnstructuredFileLoader loader = UnstructuredFileLoader( "example.pdf", mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition """ def __init__( self, file_path: Union[str, List[str]], mode: str = "single", **unstructured_kwargs: Any, ): """Initialize with file path.""" self.file_path = file_path super().__init__(mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.auto import partition return partition(filename=self.file_path, **self.unstructured_kwargs) def _get_metadata(self) -> dict: return {"source": self.file_path} def get_elements_from_api( file_path: Union[str, List[str], None] = None, file: Union[IO, Sequence[IO], None] = None, api_url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ) -> List: """Retrieve a list of elements from the `Unstructured API`.""" if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list): from unstructured.partition.api import partition_multiple_via_api _doc_elements = partition_multiple_via_api( filenames=file_path, files=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, ) elements = [] for _elements in _doc_elements: elements.extend(_elements) return elements else: from unstructured.partition.api import partition_via_api return partition_via_api( filename=file_path, file=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, ) class UnstructuredAPIFileLoader(UnstructuredFileLoader): """Load files using `Unstructured` API. By default, the loader makes a call to the hosted Unstructured API. If you are running the unstructured API locally, you can change the API rule by passing in the url parameter when you initialize the loader. The hosted Unstructured API requires an API key. See https://www.unstructured.io/api-key/ if you need to generate a key. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples ```python from langchain_community.document_loaders import UnstructuredAPIFileLoader loader = UnstructuredFileAPILoader( "example.pdf", mode="elements", strategy="fast", api_key="MY_API_KEY", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition https://www.unstructured.io/api-key/ https://github.com/Unstructured-IO/unstructured-api """ def __init__( self, file_path: Union[str, List[str]] = "", mode: str = "single", url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ): """Initialize with file path.""" validate_unstructured_version(min_unstructured_version="0.10.15") self.url = url self.api_key = api_key super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_metadata(self) -> dict: return {"source": self.file_path} def _get_elements(self) -> List: return get_elements_from_api( file_path=self.file_path, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs, ) class UnstructuredFileIOLoader(UnstructuredBaseLoader): """Load files using `Unstructured`. The file loader uses the unstructured partition function and will automatically detect the file type. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain_community.document_loaders import UnstructuredFileIOLoader with open("example.pdf", "rb") as f: loader = UnstructuredFileIOLoader( f, mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition """ def __init__( self, file: Union[IO, Sequence[IO]], mode: str = "single", **unstructured_kwargs: Any, ): """Initialize with file path.""" self.file = file super().__init__(mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.auto import partition return partition(file=self.file, **self.unstructured_kwargs) def _get_metadata(self) -> dict: return {} class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader): """Load files using `Unstructured` API. By default, the loader makes a call to the hosted Unstructured API. If you are running the unstructured API locally, you can change the API rule by passing in the url parameter when you initialize the loader. The hosted Unstructured API requires an API key. See https://www.unstructured.io/api-key/ if you need to generate a key. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain_community.document_loaders import UnstructuredAPIFileLoader with open("example.pdf", "rb") as f: loader = UnstructuredFileAPILoader( f, mode="elements", strategy="fast", api_key="MY_API_KEY", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition https://www.unstructured.io/api-key/ https://github.com/Unstructured-IO/unstructured-api """ def __init__( self, file: Union[IO, Sequence[IO]], mode: str = "single", url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ): """Initialize with file path.""" if isinstance(file, collections.abc.Sequence): validate_unstructured_version(min_unstructured_version="0.6.3") if file: validate_unstructured_version(min_unstructured_version="0.6.2") self.url = url self.api_key = api_key super().__init__(file=file, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: return get_elements_from_api( file=self.file, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~hn.py
from typing import Any, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.web_base import WebBaseLoader class HNLoader(WebBaseLoader): """Load `Hacker News` data. It loads data from either main page results or the comments page.""" def load(self) -> List[Document]: """Get important HN webpage information. HN webpage components are: - title - content - source url, - time of post - author of the post - number of comments - rank of the post """ soup_info = self.scrape() if "item" in self.web_path: return self.load_comments(soup_info) else: return self.load_results(soup_info) def load_comments(self, soup_info: Any) -> List[Document]: """Load comments from a HN post.""" comments = soup_info.select("tr[class='athing comtr']") title = soup_info.select_one("tr[id='pagespace']").get("title") return [ Document( page_content=comment.text.strip(), metadata={"source": self.web_path, "title": title}, ) for comment in comments ] def load_results(self, soup: Any) -> List[Document]: """Load items from an HN page.""" items = soup.select("tr[class='athing']") documents = [] for lineItem in items: ranking = lineItem.select_one("span[class='rank']").text link = lineItem.find("span", {"class": "titleline"}).find("a").get("href") title = lineItem.find("span", {"class": "titleline"}).text.strip() metadata = { "source": self.web_path, "title": title, "link": link, "ranking": ranking, } documents.append( Document( page_content=title, link=link, ranking=ranking, metadata=metadata ) ) return documents
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~qdrant~test_add_texts.py
import uuid from typing import Optional import pytest from libs.core.langchain_core.documents import Document from langchain_community.vectorstores import Qdrant from tests.integration_tests.vectorstores.fake_embeddings import ( ConsistentFakeEmbeddings, ) @pytest.mark.parametrize("batch_size", [1, 64]) @pytest.mark.parametrize("vector_name", [None, "my-vector"]) def test_qdrant_add_documents_extends_existing_collection( batch_size: int, vector_name: Optional[str] ) -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch: Qdrant = Qdrant.from_texts( texts, ConsistentFakeEmbeddings(), location=":memory:", batch_size=batch_size, vector_name=vector_name, ) new_texts = ["foobar", "foobaz"] docsearch.add_documents( [Document(page_content=content) for content in new_texts], batch_size=batch_size ) output = docsearch.similarity_search("foobar", k=1) # ConsistentFakeEmbeddings return the same query embedding as the first document # embedding computed in `embedding.embed_documents`. Thus, "foo" embedding is the # same as "foobar" embedding assert output == [Document(page_content="foobar")] @pytest.mark.parametrize("batch_size", [1, 64]) def test_qdrant_add_texts_returns_all_ids(batch_size: int) -> None: """Test end to end Qdrant.add_texts returns unique ids.""" docsearch: Qdrant = Qdrant.from_texts( ["foobar"], ConsistentFakeEmbeddings(), location=":memory:", batch_size=batch_size, ) ids = docsearch.add_texts(["foo", "bar", "baz"]) assert 3 == len(ids) assert 3 == len(set(ids)) @pytest.mark.parametrize("vector_name", [None, "my-vector"]) def test_qdrant_add_texts_stores_duplicated_texts(vector_name: Optional[str]) -> None: """Test end to end Qdrant.add_texts stores duplicated texts separately.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest client = QdrantClient(":memory:") collection_name = uuid.uuid4().hex vectors_config = rest.VectorParams(size=10, distance=rest.Distance.COSINE) if vector_name is not None: vectors_config = {vector_name: vectors_config} # type: ignore[assignment] client.recreate_collection(collection_name, vectors_config=vectors_config) vec_store = Qdrant( client, collection_name, embeddings=ConsistentFakeEmbeddings(), vector_name=vector_name, ) ids = vec_store.add_texts(["abc", "abc"], [{"a": 1}, {"a": 2}]) assert 2 == len(set(ids)) assert 2 == client.count(collection_name).count @pytest.mark.parametrize("batch_size", [1, 64]) def test_qdrant_add_texts_stores_ids(batch_size: int) -> None: """Test end to end Qdrant.add_texts stores provided ids.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest ids = [ "fa38d572-4c31-4579-aedc-1960d79df6df", "cdc1aa36-d6ab-4fb2-8a94-56674fd27484", ] client = QdrantClient(":memory:") collection_name = uuid.uuid4().hex client.recreate_collection( collection_name, vectors_config=rest.VectorParams(size=10, distance=rest.Distance.COSINE), ) vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings()) returned_ids = vec_store.add_texts(["abc", "def"], ids=ids, batch_size=batch_size) assert all(first == second for first, second in zip(ids, returned_ids)) assert 2 == client.count(collection_name).count stored_ids = [point.id for point in client.scroll(collection_name)[0]] assert set(ids) == set(stored_ids) @pytest.mark.parametrize("vector_name", ["custom-vector"]) def test_qdrant_add_texts_stores_embeddings_as_named_vectors(vector_name: str) -> None: """Test end to end Qdrant.add_texts stores named vectors if name is provided.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest collection_name = uuid.uuid4().hex client = QdrantClient(":memory:") client.recreate_collection( collection_name, vectors_config={ vector_name: rest.VectorParams(size=10, distance=rest.Distance.COSINE) }, ) vec_store = Qdrant( client, collection_name, ConsistentFakeEmbeddings(), vector_name=vector_name, ) vec_store.add_texts(["lorem", "ipsum", "dolor", "sit", "amet"]) assert 5 == client.count(collection_name).count assert all( vector_name in point.vector # type: ignore[operator] for point in client.scroll(collection_name, with_vectors=True)[0] )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~amadeus~closest_airport.py
from typing import Optional, Type from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from libs.core.langchain_core.pydantic_v1 import BaseModel, Field from langchain_community.chat_models import ChatOpenAI from langchain_community.tools.amadeus.base import AmadeusBaseTool class ClosestAirportSchema(BaseModel): """Schema for the AmadeusClosestAirport tool.""" location: str = Field( description=( " The location for which you would like to find the nearest airport " " along with optional details such as country, state, region, or " " province, allowing for easy processing and identification of " " the closest airport. Examples of the format are the following:\n" " Cali, Colombia\n " " Lincoln, Nebraska, United States\n" " New York, United States\n" " Sydney, New South Wales, Australia\n" " Rome, Lazio, Italy\n" " Toronto, Ontario, Canada\n" ) ) class AmadeusClosestAirport(AmadeusBaseTool): """Tool for finding the closest airport to a particular location.""" name: str = "closest_airport" description: str = ( "Use this tool to find the closest airport to a particular location." ) args_schema: Type[ClosestAirportSchema] = ClosestAirportSchema def _run( self, location: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: content = ( f" What is the nearest airport to {location}? Please respond with the " " airport's International Air Transport Association (IATA) Location " ' Identifier in the following JSON format. JSON: "iataCode": "IATA ' ' Location Identifier" ' ) return ChatOpenAI(temperature=0).predict(content)
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~retrievers~parent_document_retriever.py
import uuid from typing import List, Optional from libs.core.langchain_core.documents import Document from langchain.retrievers import MultiVectorRetriever from langchain.text_splitter import TextSplitter class ParentDocumentRetriever(MultiVectorRetriever): """Retrieve small chunks then retrieve their parent documents. When splitting documents for retrieval, there are often conflicting desires: 1. You may want to have small documents, so that their embeddings can most accurately reflect their meaning. If too long, then the embeddings can lose meaning. 2. You want to have long enough documents that the context of each chunk is retained. The ParentDocumentRetriever strikes that balance by splitting and storing small chunks of data. During retrieval, it first fetches the small chunks but then looks up the parent ids for those chunks and returns those larger documents. Note that "parent document" refers to the document that a small chunk originated from. This can either be the whole raw document OR a larger chunk. Examples: .. code-block:: python # Imports from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() # Initialize the retriever retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) """ child_splitter: TextSplitter """The text splitter to use to create child documents.""" """The key to use to track the parent id. This will be stored in the metadata of child documents.""" parent_splitter: Optional[TextSplitter] = None """The text splitter to use to create parent documents. If none, then the parent documents will be the raw documents passed in.""" def add_documents( self, documents: List[Document], ids: Optional[List[str]] = None, add_to_docstore: bool = True, ) -> None: """Adds documents to the docstore and vectorstores. Args: documents: List of documents to add ids: Optional list of ids for documents. If provided should be the same length as the list of documents. Can provided if parent documents are already in the document store and you don't want to re-add to the docstore. If not provided, random UUIDs will be used as ids. add_to_docstore: Boolean of whether to add documents to docstore. This can be false if and only if `ids` are provided. You may want to set this to False if the documents are already in the docstore and you don't want to re-add them. """ if self.parent_splitter is not None: documents = self.parent_splitter.split_documents(documents) if ids is None: doc_ids = [str(uuid.uuid4()) for _ in documents] if not add_to_docstore: raise ValueError( "If ids are not passed in, `add_to_docstore` MUST be True" ) else: if len(documents) != len(ids): raise ValueError( "Got uneven list of documents and ids. " "If `ids` is provided, should be same length as `documents`." ) doc_ids = ids docs = [] full_docs = [] for i, doc in enumerate(documents): _id = doc_ids[i] sub_docs = self.child_splitter.split_documents([doc]) for _doc in sub_docs: _doc.metadata[self.id_key] = _id docs.extend(sub_docs) full_docs.append((_id, doc)) self.vectorstore.add_documents(docs) if add_to_docstore: self.docstore.mset(full_docs)
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~output_parsers~retry.py
from __future__ import annotations from typing import Any, TypeVar from libs.core.langchain_core.exceptions import OutputParserException from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.output_parsers import BaseOutputParser from libs.core.langchain_core.prompt_values import PromptValue from libs.core.langchain_core.prompts import BasePromptTemplate, PromptTemplate NAIVE_COMPLETION_RETRY = """Prompt: {prompt} Completion: {completion} Above, the Completion did not satisfy the constraints given in the Prompt. Please try again:""" NAIVE_COMPLETION_RETRY_WITH_ERROR = """Prompt: {prompt} Completion: {completion} Above, the Completion did not satisfy the constraints given in the Prompt. Details: {error} Please try again:""" NAIVE_RETRY_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY) NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template( NAIVE_COMPLETION_RETRY_WITH_ERROR ) T = TypeVar("T") class RetryOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt and the completion to another LLM, and telling it the completion did not satisfy criteria in the prompt. """ parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT, max_retries: int = 1, ) -> RetryOutputParser[T]: """Create an RetryOutputParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: RetryOutputParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: """Parse the output of an LLM call using a wrapped parser. Args: completion: The chain completion to parse. prompt_value: The prompt to use to parse the completion. Returns: The parsed completion. """ retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion ) raise OutputParserException("Failed to parse") async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: """Parse the output of an LLM call using a wrapped parser. Args: completion: The chain completion to parse. prompt_value: The prompt to use to parse the completion. Returns: The parsed completion. """ retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( prompt=prompt_value.to_string(), completion=completion ) raise OutputParserException("Failed to parse") def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry" class RetryWithErrorOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt, the completion, AND the error that was raised to another language model and telling it that the completion did not work, and raised the given error. Differs from RetryOutputParser in that this implementation provides the error that was raised back to the LLM, which in theory should give it more information on how to fix it. """ parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT, max_retries: int = 1, ) -> RetryWithErrorOutputParser[T]: """Create a RetryWithErrorOutputParser from an LLM. Args: llm: The LLM to use to retry the completion. parser: The parser to use to parse the output. prompt: The prompt to use to retry the completion. max_retries: The maximum number of times to retry the completion. Returns: A RetryWithErrorOutputParser. """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( prompt=prompt_value.to_string(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry_with_error"
[]
2024-01-10
mth93/langchain
templates~extraction-anthropic-functions~extraction_anthropic_functions~chain.py
from typing import List, Optional from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser from langchain.prompts import ChatPromptTemplate from langchain.utils.openai_functions import convert_pydantic_to_openai_function from libs.core.langchain_core.pydantic_v1 import BaseModel from langchain_experimental.llms.anthropic_functions import AnthropicFunctions template = """A article will be passed to you. Extract from it all papers that are mentioned by this article. Do not extract the name of the article itself. If no papers are mentioned that's fine - you don't need to extract any! Just return an empty list. Do not make up or guess ANY extra information. Only extract what exactly is in the text.""" # noqa: E501 prompt = ChatPromptTemplate.from_messages([("system", template), ("human", "{input}")]) # Function output schema class Paper(BaseModel): """Information about papers mentioned.""" title: str author: Optional[str] class Info(BaseModel): """Information to extract""" papers: List[Paper] # Function definition model = AnthropicFunctions() function = [convert_pydantic_to_openai_function(Info)] chain = ( prompt | model.bind(functions=function, function_call={"name": "Info"}) | JsonKeyOutputFunctionsParser(key_name="papers") )
[ "{input}", "[('system', \"A article will be passed to you. Extract from it all papers that are mentioned by this article. \\n\\nDo not extract the name of the article itself. If no papers are mentioned that's fine - you don't need to extract any! Just return an empty list.\\n\\nDo not make up or guess ANY extra information. Only extract what exactly is in the text.\"), ('human', '{input}')]", "human", "s fine - you don", "A article will be passed to you. Extract from it all papers that are mentioned by this article. \n\nDo not extract the name of the article itself. If no papers are mentioned that's fine - you don't need to extract any! Just return an empty list.\n\nDo not make up or guess ANY extra information. Only extract what exactly is in the text." ]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~email.py
import os from typing import Any, List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader from langchain_community.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) class UnstructuredEmailLoader(UnstructuredFileLoader): """Load email files using `Unstructured`. Works with both .eml and .msg files. You can process attachments in addition to the e-mail message itself by passing process_attachments=True into the constructor for the loader. By default, attachments will be processed with the unstructured partition function. If you already know the document types of the attachments, you can specify another partitioning function with the attachment partitioner kwarg. Example ------- from langchain_community.document_loaders import UnstructuredEmailLoader loader = UnstructuredEmailLoader("example_data/fake-email.eml", mode="elements") loader.load() Example ------- from langchain_community.document_loaders import UnstructuredEmailLoader loader = UnstructuredEmailLoader( "example_data/fake-email-attachment.eml", mode="elements", process_attachments=True, ) loader.load() """ def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): process_attachments = unstructured_kwargs.get("process_attachments") attachment_partitioner = unstructured_kwargs.get("attachment_partitioner") if process_attachments and attachment_partitioner is None: from unstructured.partition.auto import partition unstructured_kwargs["attachment_partitioner"] = partition super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.file_utils.filetype import FileType, detect_filetype filetype = detect_filetype(self.file_path) if filetype == FileType.EML: from unstructured.partition.email import partition_email return partition_email(filename=self.file_path, **self.unstructured_kwargs) elif satisfies_min_unstructured_version("0.5.8") and filetype == FileType.MSG: from unstructured.partition.msg import partition_msg return partition_msg(filename=self.file_path, **self.unstructured_kwargs) else: raise ValueError( f"Filetype {filetype} is not supported in UnstructuredEmailLoader." ) class OutlookMessageLoader(BaseLoader): """ Loads Outlook Message files using extract_msg. https://github.com/TeamMsgExtractor/msg-extractor """ def __init__(self, file_path: str): """Initialize with a file path. Args: file_path: The path to the Outlook Message file. """ self.file_path = file_path if not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file" % self.file_path) try: import extract_msg # noqa:F401 except ImportError: raise ImportError( "extract_msg is not installed. Please install it with " "`pip install extract_msg`" ) def load(self) -> List[Document]: """Load data into document objects.""" import extract_msg msg = extract_msg.Message(self.file_path) return [ Document( page_content=msg.body, metadata={ "source": self.file_path, "subject": msg.subject, "sender": msg.sender, "date": msg.date, }, ) ]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~parsers~audio.py
import logging import time from typing import Dict, Iterator, Optional, Tuple from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob from langchain_community.utils.openai import is_openai_v1 logger = logging.getLogger(__name__) class OpenAIWhisperParser(BaseBlobParser): """Transcribe and parse audio files. Audio transcription is with OpenAI Whisper model.""" def __init__(self, api_key: Optional[str] = None): self.api_key = api_key def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import io try: import openai except ImportError: raise ImportError( "openai package not found, please install it with " "`pip install openai`" ) try: from pydub import AudioSegment except ImportError: raise ImportError( "pydub package not found, please install it with " "`pip install pydub`" ) if is_openai_v1(): # api_key optional, defaults to `os.environ['OPENAI_API_KEY']` client = openai.OpenAI(api_key=self.api_key) else: # Set the API key if provided if self.api_key: openai.api_key = self.api_key # Audio file from disk audio = AudioSegment.from_file(blob.path) # Define the duration of each chunk in minutes # Need to meet 25MB size limit for Whisper API chunk_duration = 20 chunk_duration_ms = chunk_duration * 60 * 1000 # Split the audio into chunk_duration_ms chunks for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)): # Audio chunk chunk = audio[i : i + chunk_duration_ms] file_obj = io.BytesIO(chunk.export(format="mp3").read()) if blob.source is not None: file_obj.name = blob.source + f"_part_{split_number}.mp3" else: file_obj.name = f"part_{split_number}.mp3" # Transcribe print(f"Transcribing part {split_number+1}!") attempts = 0 while attempts < 3: try: if is_openai_v1(): transcript = client.audio.transcriptions.create( model="whisper-1", file=file_obj ) else: transcript = openai.Audio.transcribe("whisper-1", file_obj) break except Exception as e: attempts += 1 print(f"Attempt {attempts} failed. Exception: {str(e)}") time.sleep(5) else: print("Failed to transcribe after 3 attempts.") continue yield Document( page_content=transcript.text, metadata={"source": blob.source, "chunk": split_number}, ) class OpenAIWhisperParserLocal(BaseBlobParser): """Transcribe and parse audio files with OpenAI Whisper model. Audio transcription with OpenAI Whisper model locally from transformers. Parameters: device - device to use NOTE: By default uses the gpu if available, if you want to use cpu, please set device = "cpu" lang_model - whisper model to use, for example "openai/whisper-medium" forced_decoder_ids - id states for decoder in multilanguage model, usage example: from transformers import WhisperProcessor processor = WhisperProcessor.from_pretrained("openai/whisper-medium") forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french", task="transcribe") forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french", task="translate") """ def __init__( self, device: str = "0", lang_model: Optional[str] = None, forced_decoder_ids: Optional[Tuple[Dict]] = None, ): """Initialize the parser. Args: device: device to use. lang_model: whisper model to use, for example "openai/whisper-medium". Defaults to None. forced_decoder_ids: id states for decoder in a multilanguage model. Defaults to None. """ try: from transformers import pipeline except ImportError: raise ImportError( "transformers package not found, please install it with " "`pip install transformers`" ) try: import torch except ImportError: raise ImportError( "torch package not found, please install it with " "`pip install torch`" ) # set device, cpu by default check if there is a GPU available if device == "cpu": self.device = "cpu" if lang_model is not None: self.lang_model = lang_model print("WARNING! Model override. Using model: ", self.lang_model) else: # unless overridden, use the small base model on cpu self.lang_model = "openai/whisper-base" else: if torch.cuda.is_available(): self.device = "cuda:0" # check GPU memory and select automatically the model mem = torch.cuda.get_device_properties(self.device).total_memory / ( 1024**2 ) if mem < 5000: rec_model = "openai/whisper-base" elif mem < 7000: rec_model = "openai/whisper-small" elif mem < 12000: rec_model = "openai/whisper-medium" else: rec_model = "openai/whisper-large" # check if model is overridden if lang_model is not None: self.lang_model = lang_model print("WARNING! Model override. Might not fit in your GPU") else: self.lang_model = rec_model else: "cpu" print("Using the following model: ", self.lang_model) # load model for inference self.pipe = pipeline( "automatic-speech-recognition", model=self.lang_model, chunk_length_s=30, device=self.device, ) if forced_decoder_ids is not None: try: self.pipe.model.config.forced_decoder_ids = forced_decoder_ids except Exception as exception_text: logger.info( "Unable to set forced_decoder_ids parameter for whisper model" f"Text of exception: {exception_text}" "Therefore whisper model will use default mode for decoder" ) def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import io try: from pydub import AudioSegment except ImportError: raise ImportError( "pydub package not found, please install it with `pip install pydub`" ) try: import librosa except ImportError: raise ImportError( "librosa package not found, please install it with " "`pip install librosa`" ) # Audio file from disk audio = AudioSegment.from_file(blob.path) file_obj = io.BytesIO(audio.export(format="mp3").read()) # Transcribe print(f"Transcribing part {blob.path}!") y, sr = librosa.load(file_obj, sr=16000) prediction = self.pipe(y.copy(), batch_size=8)["text"] yield Document( page_content=prediction, metadata={"source": blob.source}, ) class YandexSTTParser(BaseBlobParser): """Transcribe and parse audio files. Audio transcription is with OpenAI Whisper model.""" def __init__( self, *, api_key: Optional[str] = None, iam_token: Optional[str] = None, model: str = "general", language: str = "auto", ): """Initialize the parser. Args: api_key: API key for a service account with the `ai.speechkit-stt.user` role. iam_token: IAM token for a service account with the `ai.speechkit-stt.user` role. model: Recognition model name. Defaults to general. language: The language in ISO 639-1 format. Defaults to automatic language recognition. Either `api_key` or `iam_token` must be provided, but not both. """ if (api_key is None) == (iam_token is None): raise ValueError( "Either 'api_key' or 'iam_token' must be provided, but not both." ) self.api_key = api_key self.iam_token = iam_token self.model = model self.language = language def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" try: from speechkit import configure_credentials, creds, model_repository from speechkit.stt import AudioProcessingType except ImportError: raise ImportError( "yandex-speechkit package not found, please install it with " "`pip install yandex-speechkit`" ) try: from pydub import AudioSegment except ImportError: raise ImportError( "pydub package not found, please install it with " "`pip install pydub`" ) if self.api_key: configure_credentials( yandex_credentials=creds.YandexCredentials(api_key=self.api_key) ) else: configure_credentials( yandex_credentials=creds.YandexCredentials(iam_token=self.iam_token) ) audio = AudioSegment.from_file(blob.path) model = model_repository.recognition_model() model.model = self.model model.language = self.language model.audio_processing_type = AudioProcessingType.Full result = model.transcribe(audio) for res in result: yield Document( page_content=res.normalized_text, metadata={"source": blob.source}, )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~opaqueprompts.py
import logging from typing import Any, Dict, List, Optional from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.pydantic_v1 import Extra, root_validator from libs.core.langchain_core.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class OpaquePrompts(LLM): """An LLM wrapper that uses OpaquePrompts to sanitize prompts. Wraps another LLM and sanitizes prompts before passing it to the LLM, then de-sanitizes the response. To use, you should have the ``opaqueprompts`` python package installed, and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.llms import OpaquePrompts from langchain_community.chat_models import ChatOpenAI op_llm = OpaquePrompts(base_llm=ChatOpenAI()) """ base_llm: BaseLanguageModel """The base LLM to use.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validates that the OpaquePrompts API key and the Python package exist.""" try: import opaqueprompts as op except ImportError: raise ImportError( "Could not import the `opaqueprompts` Python package, " "please install it with `pip install opaqueprompts`." ) if op.__package__ is None: raise ValueError( "Could not properly import `opaqueprompts`, " "opaqueprompts.__package__ is None." ) api_key = get_from_dict_or_env( values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default="" ) if not api_key: raise ValueError( "Could not find OPAQUEPROMPTS_API_KEY in the environment. " "Please set it to your OpaquePrompts API key." "You can get it by creating an account on the OpaquePrompts website: " "https://opaqueprompts.opaque.co/ ." ) return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call base LLM with sanitization before and de-sanitization after. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = op_llm("Tell me a joke.") """ import opaqueprompts as op _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() # sanitize the prompt by replacing the sensitive information with a placeholder sanitize_response: op.SanitizeResponse = op.sanitize([prompt]) sanitized_prompt_value_str = sanitize_response.sanitized_texts[0] # TODO: Add in callbacks once child runs for LLMs are supported by LangSmith. # call the LLM with the sanitized prompt and get the response llm_response = self.base_llm.predict( sanitized_prompt_value_str, stop=stop, ) # desanitize the response by restoring the original sensitive information desanitize_response: op.DesanitizeResponse = op.desanitize( llm_response, secure_context=sanitize_response.secure_context, ) return desanitize_response.desanitized_text @property def _llm_type(self) -> str: """Return type of LLM. This is an override of the base class method. """ return "opaqueprompts"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~srt.py
from typing import List from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class SRTLoader(BaseLoader): """Load `.srt` (subtitle) files.""" def __init__(self, file_path: str): """Initialize with a file path.""" try: import pysrt # noqa:F401 except ImportError: raise ImportError( "package `pysrt` not found, please install it with `pip install pysrt`" ) self.file_path = file_path def load(self) -> List[Document]: """Load using pysrt file.""" import pysrt parsed_info = pysrt.open(self.file_path) text = " ".join([t.text for t in parsed_info]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~chains~test_combine_documents.py
"""Test functionality related to combining documents.""" from typing import Any, List import pytest from libs.core.langchain_core.documents import Document from libs.core.langchain_core.prompts import PromptTemplate, format_document from langchain.chains.combine_documents.reduce import ( collapse_docs, split_list_of_docs, ) from langchain.chains.qa_with_sources import load_qa_with_sources_chain from tests.unit_tests.llms.fake_llm import FakeLLM def _fake_docs_len_func(docs: List[Document]) -> int: return len(_fake_combine_docs_func(docs)) def _fake_combine_docs_func(docs: List[Document], **kwargs: Any) -> str: return "".join([d.page_content for d in docs]) def test_multiple_input_keys() -> None: chain = load_qa_with_sources_chain(FakeLLM(), chain_type="stuff") assert chain.input_keys == ["input_documents", "question"] def test__split_list_long_single_doc() -> None: """Test splitting of a long single doc.""" docs = [Document(page_content="foo" * 100)] with pytest.raises(ValueError): split_list_of_docs(docs, _fake_docs_len_func, 100) def test__split_list_single_doc() -> None: """Test splitting works with just a single doc.""" docs = [Document(page_content="foo")] doc_list = split_list_of_docs(docs, _fake_docs_len_func, 100) assert doc_list == [docs] def test__split_list_double_doc() -> None: """Test splitting works with just two docs.""" docs = [Document(page_content="foo"), Document(page_content="bar")] doc_list = split_list_of_docs(docs, _fake_docs_len_func, 100) assert doc_list == [docs] def test__split_list_works_correctly() -> None: """Test splitting works correctly.""" docs = [ Document(page_content="foo"), Document(page_content="bar"), Document(page_content="baz"), Document(page_content="foo" * 2), Document(page_content="bar"), Document(page_content="baz"), ] doc_list = split_list_of_docs(docs, _fake_docs_len_func, 10) expected_result = [ # Test a group of three. [ Document(page_content="foo"), Document(page_content="bar"), Document(page_content="baz"), ], # Test a group of two, where one is bigger. [Document(page_content="foo" * 2), Document(page_content="bar")], # Test no errors on last [Document(page_content="baz")], ] assert doc_list == expected_result def test__collapse_docs_no_metadata() -> None: """Test collapse documents functionality when no metadata.""" docs = [ Document(page_content="foo"), Document(page_content="bar"), Document(page_content="baz"), ] output = collapse_docs(docs, _fake_combine_docs_func) expected_output = Document(page_content="foobarbaz") assert output == expected_output def test__collapse_docs_one_doc() -> None: """Test collapse documents functionality when only one document present.""" # Test with no metadata. docs = [Document(page_content="foo")] output = collapse_docs(docs, _fake_combine_docs_func) assert output == docs[0] # Test with metadata. docs = [Document(page_content="foo", metadata={"source": "a"})] output = collapse_docs(docs, _fake_combine_docs_func) assert output == docs[0] def test__collapse_docs_metadata() -> None: """Test collapse documents functionality when metadata exists.""" metadata1 = {"source": "a", "foo": 2, "bar": "1", "extra1": "foo"} metadata2 = {"source": "b", "foo": "3", "bar": 2, "extra2": "bar"} docs = [ Document(page_content="foo", metadata=metadata1), Document(page_content="bar", metadata=metadata2), ] output = collapse_docs(docs, _fake_combine_docs_func) expected_metadata = { "source": "a, b", "foo": "2, 3", "bar": "1, 2", "extra1": "foo", "extra2": "bar", } expected_output = Document(page_content="foobar", metadata=expected_metadata) assert output == expected_output def test_format_doc_with_metadata() -> None: """Test format doc on a valid document.""" doc = Document(page_content="foo", metadata={"bar": "baz"}) prompt = PromptTemplate( input_variables=["page_content", "bar"], template="{page_content}, {bar}" ) expected_output = "foo, baz" output = format_document(doc, prompt) assert output == expected_output def test_format_doc_missing_metadata() -> None: """Test format doc on a document with missing metadata.""" doc = Document(page_content="foo") prompt = PromptTemplate( input_variables=["page_content", "bar"], template="{page_content}, {bar}" ) with pytest.raises(ValueError): format_document(doc, prompt)
[ "{page_content}, {bar}", "page_content" ]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~chains~test_memory.py
import pytest from libs.core.langchain_core.memory import BaseMemory from langchain.chains.conversation.memory import ( ConversationBufferMemory, ConversationBufferWindowMemory, ConversationSummaryMemory, ) from langchain.memory import ReadOnlySharedMemory, SimpleMemory from tests.unit_tests.llms.fake_llm import FakeLLM def test_simple_memory() -> None: """Test SimpleMemory.""" memory = SimpleMemory(memories={"baz": "foo"}) output = memory.load_memory_variables({}) assert output == {"baz": "foo"} assert ["baz"] == memory.memory_variables @pytest.mark.parametrize( "memory", [ ConversationBufferMemory(memory_key="baz"), ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"), ConversationBufferWindowMemory(memory_key="baz"), ], ) def test_readonly_memory(memory: BaseMemory) -> None: read_only_memory = ReadOnlySharedMemory(memory=memory) memory.save_context({"input": "bar"}, {"output": "foo"}) assert read_only_memory.load_memory_variables({}) == memory.load_memory_variables( {} )
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~slack_directory.py
import json import zipfile from pathlib import Path from typing import Dict, List, Optional from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader class SlackDirectoryLoader(BaseLoader): """Load from a `Slack` directory dump.""" def __init__(self, zip_path: str, workspace_url: Optional[str] = None): """Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None. """ self.zip_path = Path(zip_path) self.workspace_url = workspace_url self.channel_id_map = self._get_channel_id_map(self.zip_path) @staticmethod def _get_channel_id_map(zip_path: Path) -> Dict[str, str]: """Get a dictionary mapping channel names to their respective IDs.""" with zipfile.ZipFile(zip_path, "r") as zip_file: try: with zip_file.open("channels.json", "r") as f: channels = json.load(f) return {channel["name"]: channel["id"] for channel in channels} except KeyError: return {} def load(self) -> List[Document]: """Load and return documents from the Slack directory dump.""" docs = [] with zipfile.ZipFile(self.zip_path, "r") as zip_file: for channel_path in zip_file.namelist(): channel_name = Path(channel_path).parent.name if not channel_name: continue if channel_path.endswith(".json"): messages = self._read_json(zip_file, channel_path) for message in messages: document = self._convert_message_to_document( message, channel_name ) docs.append(document) return docs def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" with zip_file.open(file_path, "r") as f: data = json.load(f) return data def _convert_message_to_document( self, message: dict, channel_name: str ) -> Document: """ Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message. """ text = message.get("text", "") metadata = self._get_message_metadata(message, channel_name) return Document( page_content=text, metadata=metadata, ) def _get_message_metadata(self, message: dict, channel_name: str) -> dict: """Create and return metadata for a given message and channel.""" timestamp = message.get("ts", "") user = message.get("user", "") source = self._get_message_source(channel_name, user, timestamp) return { "source": source, "channel": channel_name, "timestamp": timestamp, "user": user, } def _get_message_source(self, channel_name: str, user: str, timestamp: str) -> str: """ Get the message source as a string. Args: channel_name (str): The name of the channel the message belongs to. user (str): The user ID who sent the message. timestamp (str): The timestamp of the message. Returns: str: The message source. """ if self.workspace_url: channel_id = self.channel_id_map.get(channel_name, "") return ( f"{self.workspace_url}/archives/{channel_id}" + f"/p{timestamp.replace('.', '')}" ) else: return f"{channel_name} - {user} - {timestamp}"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~llms~titan_takeoff.py
from typing import Any, Iterator, List, Mapping, Optional import requests from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun from libs.core.langchain_core.language_models.llms import LLM from libs.core.langchain_core.outputs import GenerationChunk from requests.exceptions import ConnectionError from langchain_community.llms.utils import enforce_stop_tokens class TitanTakeoff(LLM): """Wrapper around Titan Takeoff APIs.""" base_url: str = "http://localhost:8000" """Specifies the baseURL to use for the Titan Takeoff API. Default = http://localhost:8000. """ generate_max_length: int = 128 """Maximum generation length. Default = 128.""" sampling_topk: int = 1 """Sample predictions from the top K most probable candidates. Default = 1.""" sampling_topp: float = 1.0 """Sample from predictions whose cumulative probability exceeds this value. Default = 1.0. """ sampling_temperature: float = 1.0 """Sample with randomness. Bigger temperatures are associated with more randomness and 'creativity'. Default = 1.0. """ repetition_penalty: float = 1.0 """Penalise the generation of tokens that have been generated before. Set to > 1 to penalize. Default = 1 (no penalty). """ no_repeat_ngram_size: int = 0 """Prevent repetitions of ngrams of this size. Default = 0 (turned off).""" streaming: bool = False """Whether to stream the output. Default = False.""" @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Titan Takeoff Server.""" params = { "generate_max_length": self.generate_max_length, "sampling_topk": self.sampling_topk, "sampling_topp": self.sampling_topp, "sampling_temperature": self.sampling_temperature, "repetition_penalty": self.repetition_penalty, "no_repeat_ngram_size": self.no_repeat_ngram_size, } return params @property def _llm_type(self) -> str: """Return type of llm.""" return "titan_takeoff" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Titan Takeoff generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What is the capital of the United Kingdom?" response = model(prompt) """ try: if self.streaming: text_output = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, ): text_output += chunk.text return text_output url = f"{self.base_url}/generate" params = {"text": prompt, **self._default_params} response = requests.post(url, json=params) response.raise_for_status() response.encoding = "utf-8" text = "" if "message" in response.json(): text = response.json()["message"] else: raise ValueError("Something went wrong.") if stop is not None: text = enforce_stop_tokens(text, stop) return text except ConnectionError: raise ConnectionError( "Could not connect to Titan Takeoff server. \ Please make sure that the server is running." ) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Call out to Titan Takeoff stream endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Yields: A dictionary like object containing a string token. Example: .. code-block:: python prompt = "What is the capital of the United Kingdom?" response = model(prompt) """ url = f"{self.base_url}/generate_stream" params = {"text": prompt, **self._default_params} response = requests.post(url, json=params, stream=True) response.encoding = "utf-8" for text in response.iter_content(chunk_size=1, decode_unicode=True): if text: chunk = GenerationChunk(text=text) yield chunk if run_manager: run_manager.on_llm_new_token(token=chunk.text) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"base_url": self.base_url, **{}, **self._default_params}
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_clickhouse.py
"""Test ClickHouse functionality.""" from libs.core.langchain_core.documents import Document from langchain_community.vectorstores import Clickhouse, ClickhouseSettings from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_clickhouse() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] config = ClickhouseSettings() config.table = "test_clickhouse" docsearch = Clickhouse.from_texts(texts, FakeEmbeddings(), config=config) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"_dummy": 0})] docsearch.drop() async def test_clickhouse_async() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] config = ClickhouseSettings() config.table = "test_clickhouse_async" docsearch = Clickhouse.from_texts( texts=texts, embedding=FakeEmbeddings(), config=config ) output = await docsearch.asimilarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"_dummy": 0})] docsearch.drop() def test_clickhouse_with_metadatas() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] config = ClickhouseSettings() config.table = "test_clickhouse_with_metadatas" docsearch = Clickhouse.from_texts( texts=texts, embedding=FakeEmbeddings(), config=config, metadatas=metadatas, ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"page": "0"})] docsearch.drop() def test_clickhouse_with_metadatas_with_relevance_scores() -> None: """Test end to end construction and scored search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] config = ClickhouseSettings() config.table = "test_clickhouse_with_metadatas_with_relevance_scores" docsearch = Clickhouse.from_texts( texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, config=config ) output = docsearch.similarity_search_with_relevance_scores("foo", k=1) assert output[0][0] == Document(page_content="foo", metadata={"page": "0"}) docsearch.drop() def test_clickhouse_search_filter() -> None: """Test end to end construction and search with metadata filtering.""" texts = ["far", "bar", "baz"] metadatas = [{"first_letter": "{}".format(text[0])} for text in texts] config = ClickhouseSettings() config.table = "test_clickhouse_search_filter" docsearch = Clickhouse.from_texts( texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, config=config ) output = docsearch.similarity_search( "far", k=1, where_str=f"{docsearch.metadata_column}.first_letter='f'" ) assert output == [Document(page_content="far", metadata={"first_letter": "f"})] output = docsearch.similarity_search( "bar", k=1, where_str=f"{docsearch.metadata_column}.first_letter='b'" ) assert output == [Document(page_content="bar", metadata={"first_letter": "b"})] docsearch.drop() def test_clickhouse_with_persistence() -> None: """Test end to end construction and search, with persistence.""" config = ClickhouseSettings() config.table = "test_clickhouse_with_persistence" texts = [ "foo", "bar", "baz", ] docsearch = Clickhouse.from_texts( texts=texts, embedding=FakeEmbeddings(), config=config ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"_dummy": 0})] # Get a new VectorStore with same config # it will reuse the table spontaneously # unless you drop it docsearch = Clickhouse(embedding=FakeEmbeddings(), config=config) output = docsearch.similarity_search("foo", k=1) # Clean up docsearch.drop()
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~parsers~txt.py
"""Module for parsing text files..""" from typing import Iterator from libs.core.langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob class TextParser(BaseBlobParser): """Parser for text blobs.""" def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~tools~edenai~ocr_invoiceparser.py
from __future__ import annotations import logging from typing import Optional from libs.core.langchain_core.callbacks import CallbackManagerForToolRun from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool logger = logging.getLogger(__name__) class EdenAiParsingInvoiceTool(EdenaiTool): """Tool that queries the Eden AI Invoice parsing API. for api reference check edenai documentation: https://docs.edenai.co/reference/ocr_invoice_parser_create. To use, you should have the environment variable ``EDENAI_API_KEY`` set with your API token. You can find your token here: https://app.edenai.run/admin/account/settings """ name = "edenai_invoice_parsing" description = ( "A wrapper around edenai Services invoice parsing. " """Useful for when you have to extract information from an image it enables to take invoices in a variety of formats and returns the data in contains (items, prices, addresses, vendor name, etc.) in a structured format to automate the invoice processing """ "Input should be the string url of the document to parse." ) language: Optional[str] = None """ language of the image passed to the model. """ feature = "ocr" subfeature = "invoice_parser" def _parse_response(self, response: list) -> str: formatted_list: list = [] if len(response) == 1: self._parse_json_multilevel( response[0]["extracted_data"][0], formatted_list ) else: for entry in response: if entry.get("provider") == "eden-ai": self._parse_json_multilevel( entry["extracted_data"][0], formatted_list ) return "\n".join(formatted_list) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" query_params = { "file_url": query, "language": self.language, "attributes_as_list": False, } return self._call_eden_ai(query_params)
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_loaders~modern_treasury.py
import json import urllib.request from base64 import b64encode from typing import List, Optional from libs.core.langchain_core.documents import Document from libs.core.langchain_core.utils import get_from_env, stringify_value from langchain_community.document_loaders.base import BaseLoader MODERN_TREASURY_ENDPOINTS = { "payment_orders": "https://app.moderntreasury.com/api/payment_orders", "expected_payments": "https://app.moderntreasury.com/api/expected_payments", "returns": "https://app.moderntreasury.com/api/returns", "incoming_payment_details": "https://app.moderntreasury.com/api/\ incoming_payment_details", "counterparties": "https://app.moderntreasury.com/api/counterparties", "internal_accounts": "https://app.moderntreasury.com/api/internal_accounts", "external_accounts": "https://app.moderntreasury.com/api/external_accounts", "transactions": "https://app.moderntreasury.com/api/transactions", "ledgers": "https://app.moderntreasury.com/api/ledgers", "ledger_accounts": "https://app.moderntreasury.com/api/ledger_accounts", "ledger_transactions": "https://app.moderntreasury.com/api/ledger_transactions", "events": "https://app.moderntreasury.com/api/events", "invoices": "https://app.moderntreasury.com/api/invoices", } class ModernTreasuryLoader(BaseLoader): """Load from `Modern Treasury`.""" def __init__( self, resource: str, organization_id: Optional[str] = None, api_key: Optional[str] = None, ) -> None: """ Args: resource: The Modern Treasury resource to load. organization_id: The Modern Treasury organization ID. It can also be specified via the environment variable "MODERN_TREASURY_ORGANIZATION_ID". api_key: The Modern Treasury API key. It can also be specified via the environment variable "MODERN_TREASURY_API_KEY". """ self.resource = resource organization_id = organization_id or get_from_env( "organization_id", "MODERN_TREASURY_ORGANIZATION_ID" ) api_key = api_key or get_from_env("api_key", "MODERN_TREASURY_API_KEY") credentials = f"{organization_id}:{api_key}".encode("utf-8") basic_auth_token = b64encode(credentials).decode("utf-8") self.headers = {"Authorization": f"Basic {basic_auth_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_value(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) def load(self) -> List[Document]: return self._get_resource()
[]
2024-01-10
mth93/langchain
libs~community~tests~unit_tests~chat_models~test_azureml_endpoint.py
"""Test AzureML chat endpoint.""" import os import pytest from libs.core.langchain_core.pydantic_v1 import SecretStr from pytest import CaptureFixture, FixtureRequest from langchain_community.chat_models.azureml_endpoint import AzureMLChatOnlineEndpoint @pytest.fixture(scope="class") def api_passed_via_environment_fixture() -> AzureMLChatOnlineEndpoint: """Fixture to create an AzureMLChatOnlineEndpoint instance with API key passed from environment""" os.environ["AZUREML_ENDPOINT_API_KEY"] = "my-api-key" azure_chat = AzureMLChatOnlineEndpoint( endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score" ) del os.environ["AZUREML_ENDPOINT_API_KEY"] return azure_chat @pytest.fixture(scope="class") def api_passed_via_constructor_fixture() -> AzureMLChatOnlineEndpoint: """Fixture to create an AzureMLChatOnlineEndpoint instance with API key passed from constructor""" azure_chat = AzureMLChatOnlineEndpoint( endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score", endpoint_api_key="my-api-key", ) return azure_chat @pytest.mark.parametrize( "fixture_name", ["api_passed_via_constructor_fixture", "api_passed_via_environment_fixture"], ) class TestAzureMLChatOnlineEndpoint: def test_api_key_is_secret_string( self, fixture_name: str, request: FixtureRequest ) -> None: """Test that the API key is a SecretStr instance""" azure_chat = request.getfixturevalue(fixture_name) assert isinstance(azure_chat.endpoint_api_key, SecretStr) def test_api_key_masked( self, fixture_name: str, request: FixtureRequest, capsys: CaptureFixture ) -> None: """Test that the API key is masked""" azure_chat = request.getfixturevalue(fixture_name) print(azure_chat.endpoint_api_key, end="") captured = capsys.readouterr() assert ( (str(azure_chat.endpoint_api_key) == "**********") and (repr(azure_chat.endpoint_api_key) == "SecretStr('**********')") and (captured.out == "**********") ) def test_api_key_is_readable( self, fixture_name: str, request: FixtureRequest ) -> None: """Test that the real secret value of the API key can be read""" azure_chat = request.getfixturevalue(fixture_name) assert azure_chat.endpoint_api_key.get_secret_value() == "my-api-key"
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~document_transformers~doctran_text_translate.py
from typing import Any, Optional, Sequence from libs.core.langchain_core.documents import BaseDocumentTransformer, Document from libs.core.langchain_core.utils import get_from_env class DoctranTextTranslator(BaseDocumentTransformer): """Translate text documents using doctran. Arguments: openai_api_key: OpenAI API key. Can also be specified via environment variable ``OPENAI_API_KEY``. language: The language to translate *to*. Example: .. code-block:: python from langchain_community.document_transformers import DoctranTextTranslator # Pass in openai_api_key or set env var OPENAI_API_KEY qa_translator = DoctranTextTranslator(language="spanish") translated_document = await qa_translator.atransform_documents(documents) """ def __init__( self, openai_api_key: Optional[str] = None, language: str = "english", openai_api_model: Optional[str] = None, ) -> None: self.openai_api_key = openai_api_key or get_from_env( "openai_api_key", "OPENAI_API_KEY" ) self.openai_api_model = openai_api_model or get_from_env( "openai_api_model", "OPENAI_API_MODEL" ) self.language = language async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: raise NotImplementedError def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Translates text documents using doctran.""" try: from doctran import Doctran doctran = Doctran( openai_api_key=self.openai_api_key, openai_model=self.openai_api_model ) except ImportError: raise ImportError( "Install doctran to use this parser. (pip install doctran)" ) doctran_docs = [ doctran.parse(content=doc.page_content, metadata=doc.metadata) for doc in documents ] for i, doc in enumerate(doctran_docs): doctran_docs[i] = doc.translate(language=self.language).execute() return [ Document(page_content=doc.transformed_content, metadata=doc.metadata) for doc in doctran_docs ]
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~llamacpp.py
from typing import Any, Dict, List, Optional from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator class LlamaCppEmbeddings(BaseModel, Embeddings): """llama.cpp embedding models. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain_community.embeddings import LlamaCppEmbeddings llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ client: Any #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" verbose: bool = Field(True, alias="verbose") """Print verbose output to stderr.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", "verbose", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, embedding=True, **model_params) except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using the Llama model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] def embed_query(self, text: str) -> List[float]: """Embed a query using the Llama model. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed(text) return list(map(float, embedding))
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~evaluation~comparison~eval_chain.py
"""Base classes for comparing the output of two models.""" from __future__ import annotations import logging import re from typing import Any, Dict, List, Optional, Union from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.output_parsers import BaseOutputParser from libs.core.langchain_core.prompts.prompt import PromptTemplate from libs.core.langchain_core.pydantic_v1 import Extra, Field from langchain.callbacks.manager import Callbacks from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.llm import LLMChain from langchain.chat_models.azure_openai import AzureChatOpenAI from langchain.chat_models.openai import ChatOpenAI from langchain.evaluation.comparison.prompt import ( COMPARISON_TEMPLATE, COMPARISON_TEMPLATE_WITH_REFERENCE, CRITERIA_INSTRUCTIONS, ) from langchain.evaluation.criteria.eval_chain import ( CRITERIA_TYPE, Criteria, ) from langchain.evaluation.schema import LLMEvalChain, PairwiseStringEvaluator from langchain.schema import RUN_KEY logger = logging.getLogger(__name__) _FIND_DOUBLE_BRACKETS = re.compile(r"\[\[(.*?)\]\]") _SUPPORTED_CRITERIA = { Criteria.CONCISENESS: "Is the submission concise and to the point?", Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?", Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?", Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?", Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?", Criteria.MALICIOUSNESS: "Is the submission malicious in any way?", Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?", Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?", Criteria.MISOGYNY: "Is the submission misogynistic or sexist?", Criteria.CRIMINALITY: "Is the submission criminal in any way?", Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?", Criteria.DEPTH: "Does the submission demonstrate depth of thought?", Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?", Criteria.DETAIL: "Does the submission demonstrate attention to detail?", } def resolve_pairwise_criteria( criteria: Optional[Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]]], ) -> dict: """Resolve the criteria for the pairwise evaluator. Args: criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional): The criteria to use. Returns: dict: The resolved criteria. """ if criteria is None: _default_criteria = [ Criteria.HELPFULNESS, Criteria.RELEVANCE, Criteria.CORRECTNESS, Criteria.DEPTH, ] return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria} elif isinstance(criteria, Criteria): criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} elif isinstance(criteria, str): if criteria in _SUPPORTED_CRITERIA: criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} else: criteria_ = {criteria: ""} elif isinstance(criteria, ConstitutionalPrinciple): criteria_ = {criteria.name: criteria.critique_request} elif isinstance(criteria, (list, tuple)): criteria_ = { k: v for criterion in criteria for k, v in resolve_pairwise_criteria(criterion).items() } else: if not criteria: raise ValueError( "Criteria cannot be empty. " "Please provide a criterion name or a mapping of the criterion name" " to its description." ) criteria_ = dict(criteria) return criteria_ class PairwiseStringResultOutputParser(BaseOutputParser[dict]): """A parser for the output of the PairwiseStringEvalChain. Attributes: _type (str): The type of the output parser. """ @property def _type(self) -> str: """Return the type of the output parser. Returns: str: The type of the output parser. """ return "pairwise_string_result" def parse(self, text: str) -> Dict[str, Any]: """Parse the output text. Args: text (str): The output text to parse. Returns: Dict: The parsed output. Raises: ValueError: If the verdict is invalid. """ match = _FIND_DOUBLE_BRACKETS.search(text) if match: verdict = match.group(1) if not match or verdict not in {"A", "B", "C"}: raise ValueError( f"Invalid output: {text}. " "Output must contain a double bracketed string\ with the verdict 'A', 'B', or 'C'." ) # C means the models are tied. Return 'None' meaning no preference verdict_ = None if verdict == "C" else verdict score = { "A": 1, "B": 0, "C": 0.5, }[verdict] return { "reasoning": text, "value": verdict_, "score": score, } class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): """A chain for comparing two outputs, such as the outputs of two models, prompts, or outputs of a single model on similar inputs. Attributes: output_parser (BaseOutputParser): The output parser for the chain. Example: >>> from langchain.chat_models import ChatOpenAI >>> from langchain.evaluation.comparison import PairwiseStringEvalChain >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4", model_kwargs={"random_seed": 42}) >>> chain = PairwiseStringEvalChain.from_llm(llm=llm) >>> result = chain.evaluate_string_pairs( ... input = "What is the chemical formula for water?", ... prediction = "H2O", ... prediction_b = ( ... "The chemical formula for water is H2O, which means" ... " there are two hydrogen atoms and one oxygen atom." ... reference = "The chemical formula for water is H2O.", ... ) >>> print(result) # { # "value": "B", # "comment": "Both responses accurately state" # " that the chemical formula for water is H2O." # " However, Response B provides additional information" # . " by explaining what the formula means.\\n[[B]]" # } """ # noqa: E501 output_key: str = "results" #: :meta private: output_parser: BaseOutputParser = Field( default_factory=PairwiseStringResultOutputParser ) @classmethod def is_lc_serializable(cls) -> bool: return False class Config: """Configuration for the PairwiseStringEvalChain.""" extra = Extra.ignore @property def requires_reference(self) -> bool: """Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return False @property def requires_input(self) -> bool: """Return whether the chain requires an input. Returns: bool: True if the chain requires an input, False otherwise. """ return True @property def _skip_reference_warning(self) -> str: """Return the warning to show when reference is ignored. Returns: str: The warning to show when reference is ignored. """ return ( f"Ignoring reference in {self.__class__.__name__}, as it is not expected." "\nTo use a reference, use the LabeledPairwiseStringEvalChain" " (EvaluatorType.LABELED_PAIRWISE_STRING) instead." ) @classmethod def from_llm( cls, llm: BaseLanguageModel, *, prompt: Optional[PromptTemplate] = None, criteria: Optional[Union[CRITERIA_TYPE, str]] = None, **kwargs: Any, ) -> PairwiseStringEvalChain: """Initialize the PairwiseStringEvalChain from an LLM. Args: llm (BaseChatModel): The LLM to use (GPT-4 recommended). prompt (PromptTemplate, optional): The prompt to use. **kwargs (Any): Additional keyword arguments. Returns: PairwiseStringEvalChain: The initialized PairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ if not ( isinstance(llm, (ChatOpenAI, AzureChatOpenAI)) and llm.model_name.startswith("gpt-4") ): logger.warning( "This chain was only tested with GPT-4. \ Performance may be significantly worse with other models." ) expected_input_vars = {"prediction", "prediction_b", "input", "criteria"} prompt_ = prompt or COMPARISON_TEMPLATE.partial(reference="") if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) criteria_ = resolve_pairwise_criteria(criteria) criteria_str = "\n".join(f"{k}: {v}" if v else k for k, v in criteria_.items()) criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else "" return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs) def _prepare_input( self, prediction: str, prediction_b: str, input: Optional[str], reference: Optional[str], ) -> dict: """Prepare the input for the chain. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. input (str, optional): The input or task string. reference (str, optional): The reference string, if any. Returns: dict: The prepared input for the chain. """ input_ = { "prediction": prediction, "prediction_b": prediction_b, "input": input, } if self.requires_reference: input_["reference"] = reference return input_ def _prepare_output(self, result: dict) -> dict: """Prepare the output.""" parsed = result[self.output_key] if RUN_KEY in result: parsed[RUN_KEY] = result[RUN_KEY] return parsed def _evaluate_string_pairs( self, *, prediction: str, prediction_b: str, input: Optional[str] = None, reference: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Evaluate whether output A is preferred to output B. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. input (str, optional): The input or task string. callbacks (Callbacks, optional): The callbacks to use. reference (str, optional): The reference string, if any. **kwargs (Any): Additional keyword arguments. Returns: dict: A dictionary containing: - reasoning: The reasoning for the preference. - value: The preference value, which is either 'A', 'B', or None for no preference. - score: The preference score, which is 1 for 'A', 0 for 'B', and 0.5 for None. """ input_ = self._prepare_input(prediction, prediction_b, input, reference) result = self( inputs=input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, ) return self._prepare_output(result) async def _aevaluate_string_pairs( self, *, prediction: str, prediction_b: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Asynchronously evaluate whether output A is preferred to output B. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. input (str, optional): The input or task string. callbacks (Callbacks, optional): The callbacks to use. reference (str, optional): The reference string, if any. **kwargs (Any): Additional keyword arguments. Returns: dict: A dictionary containing: - reasoning: The reasoning for the preference. - value: The preference value, which is either 'A', 'B', or None for no preference. - score: The preference score, which is 1 for 'A', 0 for 'B', and 0.5 for None. """ input_ = self._prepare_input(prediction, prediction_b, input, reference) result = await self.acall( inputs=input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, ) return self._prepare_output(result) class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain): """A chain for comparing two outputs, such as the outputs of two models, prompts, or outputs of a single model on similar inputs, with labeled preferences. Attributes: output_parser (BaseOutputParser): The output parser for the chain. """ @property def requires_reference(self) -> bool: """Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return True @classmethod def from_llm( cls, llm: BaseLanguageModel, *, prompt: Optional[PromptTemplate] = None, criteria: Optional[Union[CRITERIA_TYPE, str]] = None, **kwargs: Any, ) -> PairwiseStringEvalChain: """Initialize the LabeledPairwiseStringEvalChain from an LLM. Args: llm (BaseLanguageModel): The LLM to use. prompt (PromptTemplate, optional): The prompt to use. criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. **kwargs (Any): Additional keyword arguments. Returns: LabeledPairwiseStringEvalChain: The initialized LabeledPairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ # noqa: E501 expected_input_vars = { "prediction", "prediction_b", "input", "reference", "criteria", } prompt_ = prompt or COMPARISON_TEMPLATE_WITH_REFERENCE if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) criteria_ = resolve_pairwise_criteria(criteria) criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()) criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else "" return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~llms~test_anthropic.py
"""Test Anthropic API wrapper.""" from typing import Generator import pytest from libs.core.langchain_core.callbacks import CallbackManager from libs.core.langchain_core.outputs import LLMResult from langchain_community.llms.anthropic import Anthropic from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler @pytest.mark.requires("anthropic") def test_anthropic_model_name_param() -> None: llm = Anthropic(model_name="foo") assert llm.model == "foo" @pytest.mark.requires("anthropic") def test_anthropic_model_param() -> None: llm = Anthropic(model="foo") assert llm.model == "foo" def test_anthropic_call() -> None: """Test valid call to anthropic.""" llm = Anthropic(model="claude-instant-1") output = llm("Say foo:") assert isinstance(output, str) def test_anthropic_streaming() -> None: """Test streaming tokens from anthropic.""" llm = Anthropic(model="claude-instant-1") generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) for token in generator: assert isinstance(token, str) def test_anthropic_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) llm = Anthropic( streaming=True, callback_manager=callback_manager, verbose=True, ) llm("Write me a sentence with 100 words.") assert callback_handler.llm_streams > 1 async def test_anthropic_async_generate() -> None: """Test async generate.""" llm = Anthropic() output = await llm.agenerate(["How many toes do dogs have?"]) assert isinstance(output, LLMResult) async def test_anthropic_async_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) llm = Anthropic( streaming=True, callback_manager=callback_manager, verbose=True, ) result = await llm.agenerate(["How many toes do dogs have?"]) assert callback_handler.llm_streams > 1 assert isinstance(result, LLMResult)
[]
2024-01-10
mth93/langchain
libs~community~tests~integration_tests~vectorstores~test_vearch.py
# flake8: noqa from libs.core.langchain_core.documents import Document from langchain_community.vectorstores.vearch import Vearch from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_vearch() -> None: """ Test end to end create vearch ,store vector into it and search """ texts = [ "Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用", "Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", "vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装", ] metadatas = [ { "source": ( "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt" ) }, { "source": ( "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt" ) }, { "source": ( "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt" ) }, ] vearch_db = Vearch.from_texts( texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, table_name="test_vearch", metadata_path="./", ) result = vearch_db.similarity_search( "Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", 1 ) assert result == [ Document( page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", metadata={ "source": ( "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/" "three_body.txt" ) }, ) ] def test_vearch_add_texts() -> None: """Test end to end adding of texts.""" texts = [ ( "Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量," "可用于基于个人知识库的大模型应用" ), "Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", "vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装", ] metadatas = [ { "source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/" "three_body.txt" }, { "source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/" "three_body.txt" }, { "source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/" "three_body.txt" }, ] vearch_db = Vearch.from_texts( texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, table_name="test_vearch", metadata_path="./", ) vearch_db.add_texts( texts=["Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库"], metadatas=[ { "source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt" }, ], ) result = vearch_db.similarity_search( "Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", 2 ) assert result == [ Document( page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", metadata={ "source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt" }, ), Document( page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", metadata={ "source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt" }, ), ]
[]
2024-01-10
mth93/langchain
libs~langchain~langchain~chains~sequential.py
"""Chain pipeline where the outputs of one step feed directly into next.""" from typing import Any, Dict, List, Optional from libs.core.langchain_core.pydantic_v1 import Extra, root_validator from libs.core.langchain_core.utils.input import get_color_mapping from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain class SequentialChain(Chain): """Chain where the outputs of one chain feed directly into next.""" chains: List[Chain] input_variables: List[str] output_variables: List[str] #: :meta private: return_all: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Return expected input keys to the chain. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return self.output_variables @root_validator(pre=True) def validate_chains(cls, values: Dict) -> Dict: """Validate that the correct inputs exist for all chains.""" chains = values["chains"] input_variables = values["input_variables"] memory_keys = list() if "memory" in values and values["memory"] is not None: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables if set(input_variables).intersection(set(memory_keys)): overlapping_keys = set(input_variables) & set(memory_keys) raise ValueError( f"The input key(s) {''.join(overlapping_keys)} are found " f"in the Memory keys ({memory_keys}) - please use input and " f"memory keys that don't overlap." ) known_variables = set(input_variables + memory_keys) for chain in chains: missing_vars = set(chain.input_keys).difference(known_variables) if chain.memory: missing_vars = missing_vars.difference(chain.memory.memory_variables) if missing_vars: raise ValueError( f"Missing required input keys: {missing_vars}, " f"only had {known_variables}" ) overlapping_keys = known_variables.intersection(chain.output_keys) if overlapping_keys: raise ValueError( f"Chain returned keys that already exist: {overlapping_keys}" ) known_variables |= set(chain.output_keys) if "output_variables" not in values: if values.get("return_all", False): output_keys = known_variables.difference(input_variables) else: output_keys = chains[-1].output_keys values["output_variables"] = output_keys else: missing_vars = set(values["output_variables"]).difference(known_variables) if missing_vars: raise ValueError( f"Expected output variables that were not found: {missing_vars}." ) return values def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: known_values = inputs.copy() _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() for i, chain in enumerate(self.chains): callbacks = _run_manager.get_child() outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: known_values = inputs.copy() _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() for i, chain in enumerate(self.chains): outputs = await chain.acall( known_values, return_only_outputs=True, callbacks=callbacks ) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} class SimpleSequentialChain(Chain): """Simple chain where the outputs of one step feed directly into next.""" chains: List[Chain] strip_outputs: bool = False input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that chains are all single input/output.""" for chain in values["chains"]: if len(chain.input_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one input, got " f"{chain} with {len(chain.input_keys)} inputs." ) if len(chain.output_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one output, got " f"{chain} with {len(chain.output_keys)} outputs." ) return values def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.chains): _input = chain.run(_input, callbacks=_run_manager.get_child(f"step_{i+1}")) if self.strip_outputs: _input = _input.strip() _run_manager.on_text( _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose ) return {self.output_key: _input} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.chains): _input = await chain.arun( _input, callbacks=_run_manager.get_child(f"step_{i+1}") ) if self.strip_outputs: _input = _input.strip() await _run_manager.on_text( _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose ) return {self.output_key: _input}
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~embeddings~javelin_ai_gateway.py
from __future__ import annotations from typing import Any, Iterator, List, Optional from libs.core.langchain_core.embeddings import Embeddings from libs.core.langchain_core.pydantic_v1 import BaseModel def _chunk(texts: List[str], size: int) -> Iterator[List[str]]: for i in range(0, len(texts), size): yield texts[i : i + size] class JavelinAIGatewayEmbeddings(Embeddings, BaseModel): """ Wrapper around embeddings LLMs in the Javelin AI Gateway. To use, you should have the ``javelin_sdk`` python package installed. For more information, see https://docs.getjavelin.io Example: .. code-block:: python from langchain_community.embeddings import JavelinAIGatewayEmbeddings embeddings = JavelinAIGatewayEmbeddings( gateway_uri="<javelin-ai-gateway-uri>", route="<your-javelin-gateway-embeddings-route>" ) """ client: Any """javelin client.""" route: str """The route to use for the Javelin AI Gateway API.""" gateway_uri: Optional[str] = None """The URI for the Javelin AI Gateway API.""" javelin_api_key: Optional[str] = None """The API key for the Javelin AI Gateway API.""" def __init__(self, **kwargs: Any): try: from javelin_sdk import ( JavelinClient, UnauthorizedError, ) except ImportError: raise ImportError( "Could not import javelin_sdk python package. " "Please install it with `pip install javelin_sdk`." ) super().__init__(**kwargs) if self.gateway_uri: try: self.client = JavelinClient( base_url=self.gateway_uri, api_key=self.javelin_api_key ) except UnauthorizedError as e: raise ValueError("Javelin: Incorrect API Key.") from e def _query(self, texts: List[str]) -> List[List[float]]: embeddings = [] for txt in _chunk(texts, 20): try: resp = self.client.query_route(self.route, query_body={"input": txt}) resp_dict = resp.dict() embeddings_chunk = resp_dict.get("llm_response", {}).get("data", []) for item in embeddings_chunk: if "embedding" in item: embeddings.append(item["embedding"]) except ValueError as e: print("Failed to query route: " + str(e)) return embeddings async def _aquery(self, texts: List[str]) -> List[List[float]]: embeddings = [] for txt in _chunk(texts, 20): try: resp = await self.client.aquery_route( self.route, query_body={"input": txt} ) resp_dict = resp.dict() embeddings_chunk = resp_dict.get("llm_response", {}).get("data", []) for item in embeddings_chunk: if "embedding" in item: embeddings.append(item["embedding"]) except ValueError as e: print("Failed to query route: " + str(e)) return embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: return self._query(texts) def embed_query(self, text: str) -> List[float]: return self._query([text])[0] async def aembed_documents(self, texts: List[str]) -> List[List[float]]: return await self._aquery(texts) async def aembed_query(self, text: str) -> List[float]: result = await self._aquery([text]) return result[0]
[]
2024-01-10
mth93/langchain
libs~langchain~tests~unit_tests~chains~test_history_aware_retriever.py
from libs.core.langchain_core.documents import Document from libs.core.langchain_core.prompts import PromptTemplate from langchain.chains import create_history_aware_retriever from langchain.llms.fake import FakeListLLM from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever def test_create() -> None: answer = "I know the answer!" llm = FakeListLLM(responses=[answer]) retriever = FakeParrotRetriever() question_gen_prompt = PromptTemplate.from_template("hi! {input} {chat_history}") chain = create_history_aware_retriever(llm, retriever, question_gen_prompt) expected_output = [Document(page_content="What is the answer?")] output = chain.invoke({"input": "What is the answer?", "chat_history": []}) assert output == expected_output output = chain.invoke({"input": "What is the answer?"}) assert output == expected_output expected_output = [Document(page_content="I know the answer!")] output = chain.invoke( {"input": "What is the answer?", "chat_history": ["hi", "hi"]} ) assert output == expected_output
[ "hi! {input} {chat_history}" ]
2024-01-10
mth93/langchain
libs~langchain~langchain~retrievers~document_compressors~chain_extract.py
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents.""" from __future__ import annotations import asyncio from typing import Any, Callable, Dict, Optional, Sequence from libs.core.langchain_core.documents import Document from libs.core.langchain_core.language_models import BaseLanguageModel from libs.core.langchain_core.output_parsers import BaseOutputParser from libs.core.langchain_core.prompts import PromptTemplate from langchain.callbacks.manager import Callbacks from langchain.chains.llm import LLMChain from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.retrievers.document_compressors.chain_extract_prompt import ( prompt_template, ) def default_get_input(query: str, doc: Document) -> Dict[str, Any]: """Return the compression chain input.""" return {"question": query, "context": doc.page_content} class NoOutputParser(BaseOutputParser[str]): """Parse outputs that could return a null string of some sort.""" no_output_str: str = "NO_OUTPUT" def parse(self, text: str) -> str: cleaned_text = text.strip() if cleaned_text == self.no_output_str: return "" return cleaned_text def _get_default_chain_prompt() -> PromptTemplate: output_parser = NoOutputParser() template = prompt_template.format(no_output_str=output_parser.no_output_str) return PromptTemplate( template=template, input_variables=["question", "context"], output_parser=output_parser, ) class LLMChainExtractor(BaseDocumentCompressor): """Document compressor that uses an LLM chain to extract the relevant parts of documents.""" llm_chain: LLMChain """LLM wrapper to use for compressing documents.""" get_input: Callable[[str, Document], dict] = default_get_input """Callable for constructing the chain input from the query and a Document.""" def compress_documents( self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None, ) -> Sequence[Document]: """Compress page content of raw documents.""" compressed_docs = [] for doc in documents: _input = self.get_input(query, doc) output = self.llm_chain.predict_and_parse(**_input, callbacks=callbacks) if len(output) == 0: continue compressed_docs.append(Document(page_content=output, metadata=doc.metadata)) return compressed_docs async def acompress_documents( self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None, ) -> Sequence[Document]: """Compress page content of raw documents asynchronously.""" outputs = await asyncio.gather( *[ self.llm_chain.apredict_and_parse( **self.get_input(query, doc), callbacks=callbacks ) for doc in documents ] ) compressed_docs = [] for i, doc in enumerate(documents): if len(outputs[i]) == 0: continue compressed_docs.append( Document(page_content=outputs[i], metadata=doc.metadata) ) return compressed_docs @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, get_input: Optional[Callable[[str, Document], str]] = None, llm_chain_kwargs: Optional[dict] = None, ) -> LLMChainExtractor: """Initialize from LLM.""" _prompt = prompt if prompt is not None else _get_default_chain_prompt() _get_input = get_input if get_input is not None else default_get_input llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {})) return cls(llm_chain=llm_chain, get_input=_get_input)
[]
2024-01-10
mth93/langchain
libs~langchain~tests~integration_tests~memory~test_firestore.py
import json from libs.core.langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import FirestoreChatMessageHistory def test_memory_with_message_store() -> None: """Test the memory with a message store.""" message_history = FirestoreChatMessageHistory( collection_name="chat_history", session_id="my-test-session", user_id="my-test-user", ) memory = ConversationBufferMemory( memory_key="baz", chat_memory=message_history, return_messages=True ) # add some messages memory.chat_memory.add_ai_message("This is me, the AI") memory.chat_memory.add_user_message("This is me, the human") # get the message history from the memory store # and check if the messages are there as expected message_history = FirestoreChatMessageHistory( collection_name="chat_history", session_id="my-test-session", user_id="my-test-user", ) memory = ConversationBufferMemory( memory_key="baz", chat_memory=message_history, return_messages=True ) messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # remove the record from Firestore, so the next test run won't pick it up memory.chat_memory.clear() assert memory.chat_memory.messages == []
[]
2024-01-10
mth93/langchain
libs~community~langchain_community~callbacks~mlflow_callback.py
import os import random import string import tempfile import traceback from copy import deepcopy from pathlib import Path from typing import Any, Dict, List, Optional, Union from libs.core.langchain_core.agents import AgentAction, AgentFinish from libs.core.langchain_core.callbacks import BaseCallbackHandler from libs.core.langchain_core.outputs import LLMResult from libs.core.langchain_core.utils import get_from_dict_or_env from langchain_community.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, hash_string, import_pandas, import_spacy, import_textstat, ) def import_mlflow() -> Any: """Import the mlflow python package and raise an error if it is not installed.""" try: import mlflow except ImportError: raise ImportError( "To use the mlflow callback manager you need to have the `mlflow` python " "package installed. Please install it with `pip install mlflow>=2.3.0`" ) return mlflow def analyze_text( text: str, nlp: Any = None, ) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze. nlp (spacy.lang): The spacy language model to use for visualization. Returns: (dict): A dictionary containing the complexity metrics and visualization files serialized to HTML string. """ resp: Dict[str, Any] = {} textstat = import_textstat() spacy = import_spacy() text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), # "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update({"text_complexity_metrics": text_complexity_metrics}) resp.update(text_complexity_metrics) if nlp is not None: doc = nlp(text) dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True ) text_visualizations = { "dependency_tree": dep_out, "entities": ent_out, } resp.update(text_visualizations) return resp def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any: """Construct an html element from a prompt and a generation. Parameters: prompt (str): The prompt. generation (str): The generation. Returns: (str): The html string.""" formatted_prompt = prompt.replace("\n", "<br>") formatted_generation = generation.replace("\n", "<br>") return f""" <p style="color:black;">{formatted_prompt}:</p> <blockquote> <p style="color:green;"> {formatted_generation} </p> </blockquote> """ class MlflowLogger: """Callback Handler that logs metrics and artifacts to mlflow server. Parameters: name (str): Name of the run. experiment (str): Name of the experiment. tags (dict): Tags to be attached for the run. tracking_uri (str): MLflow tracking server uri. This handler implements the helper functions to initialize, log metrics and artifacts to the mlflow server. """ def __init__(self, **kwargs: Any): self.mlflow = import_mlflow() if "DATABRICKS_RUNTIME_VERSION" in os.environ: self.mlflow.set_tracking_uri("databricks") self.mlf_expid = self.mlflow.tracking.fluent._get_experiment_id() self.mlf_exp = self.mlflow.get_experiment(self.mlf_expid) else: tracking_uri = get_from_dict_or_env( kwargs, "tracking_uri", "MLFLOW_TRACKING_URI", "" ) self.mlflow.set_tracking_uri(tracking_uri) # User can set other env variables described here # > https://www.mlflow.org/docs/latest/tracking.html#logging-to-a-tracking-server experiment_name = get_from_dict_or_env( kwargs, "experiment_name", "MLFLOW_EXPERIMENT_NAME" ) self.mlf_exp = self.mlflow.get_experiment_by_name(experiment_name) if self.mlf_exp is not None: self.mlf_expid = self.mlf_exp.experiment_id else: self.mlf_expid = self.mlflow.create_experiment(experiment_name) self.start_run(kwargs["run_name"], kwargs["run_tags"]) def start_run(self, name: str, tags: Dict[str, str]) -> None: """To start a new run, auto generates the random suffix for name""" if name.endswith("-%"): rname = "".join(random.choices(string.ascii_uppercase + string.digits, k=7)) name = name.replace("%", rname) self.run = self.mlflow.MlflowClient().create_run( self.mlf_expid, run_name=name, tags=tags ) def finish_run(self) -> None: """To finish the run.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.end_run() def metric(self, key: str, value: float) -> None: """To log metric to mlflow server.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_metric(key, value) def metrics( self, data: Union[Dict[str, float], Dict[str, int]], step: Optional[int] = 0 ) -> None: """To log all metrics in the input dict.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_metrics(data) def jsonf(self, data: Dict[str, Any], filename: str) -> None: """To log the input data as json file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_dict(data, f"{filename}.json") def table(self, name: str, dataframe) -> None: # type: ignore """To log the input pandas dataframe as a html table""" self.html(dataframe.to_html(), f"table_{name}") def html(self, html: str, filename: str) -> None: """To log the input html string as html file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_text(html, f"{filename}.html") def text(self, text: str, filename: str) -> None: """To log the input text as text file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_text(text, f"{filename}.txt") def artifact(self, path: str) -> None: """To upload the file from given path as artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_artifact(path) def langchain_artifact(self, chain: Any) -> None: with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.langchain.log_model(chain, "langchain-model") class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs metrics and artifacts to mlflow server. Parameters: name (str): Name of the run. experiment (str): Name of the experiment. tags (dict): Tags to be attached for the run. tracking_uri (str): MLflow tracking server uri. This handler will utilize the associated callback method called and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and action. It then logs the response to mlflow server. """ def __init__( self, name: Optional[str] = "langchainrun-%", experiment: Optional[str] = "langchain", tags: Optional[Dict] = None, tracking_uri: Optional[str] = None, ) -> None: """Initialize callback handler.""" import_pandas() import_textstat() import_mlflow() spacy = import_spacy() super().__init__() self.name = name self.experiment = experiment self.tags = tags or {} self.tracking_uri = tracking_uri self.temp_dir = tempfile.TemporaryDirectory() self.mlflg = MlflowLogger( tracking_uri=self.tracking_uri, experiment_name=self.experiment, run_name=self.name, run_tags=self.tags, ) self.action_records: list = [] self.nlp = spacy.load("en_core_web_sm") self.metrics = { "step": 0, "starts": 0, "ends": 0, "errors": 0, "text_ctr": 0, "chain_starts": 0, "chain_ends": 0, "llm_starts": 0, "llm_ends": 0, "llm_streams": 0, "tool_starts": 0, "tool_ends": 0, "agent_ends": 0, } self.records: Dict[str, Any] = { "on_llm_start_records": [], "on_llm_token_records": [], "on_llm_end_records": [], "on_chain_start_records": [], "on_chain_end_records": [], "on_tool_start_records": [], "on_tool_end_records": [], "on_text_records": [], "on_agent_finish_records": [], "on_agent_action_records": [], "action_records": [], } def _reset(self) -> None: for k, v in self.metrics.items(): self.metrics[k] = 0 for k, v in self.records.items(): self.records[k] = [] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.metrics["step"] += 1 self.metrics["llm_starts"] += 1 self.metrics["starts"] += 1 llm_starts = self.metrics["llm_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) for idx, prompt in enumerate(prompts): prompt_resp = deepcopy(resp) prompt_resp["prompt"] = prompt self.records["on_llm_start_records"].append(prompt_resp) self.records["action_records"].append(prompt_resp) self.mlflg.jsonf(prompt_resp, f"llm_start_{llm_starts}_prompt_{idx}") def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.metrics["step"] += 1 self.metrics["llm_streams"] += 1 llm_streams = self.metrics["llm_streams"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_llm_token_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"llm_new_tokens_{llm_streams}") def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.metrics["step"] += 1 self.metrics["llm_ends"] += 1 self.metrics["ends"] += 1 llm_ends = self.metrics["llm_ends"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) for generations in response.generations: for idx, generation in enumerate(generations): generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) generation_resp.update( analyze_text( generation.text, nlp=self.nlp, ) ) complexity_metrics: Dict[str, float] = generation_resp.pop( "text_complexity_metrics" ) # type: ignore # noqa: E501 self.mlflg.metrics( complexity_metrics, step=self.metrics["step"], ) self.records["on_llm_end_records"].append(generation_resp) self.records["action_records"].append(generation_resp) self.mlflg.jsonf(resp, f"llm_end_{llm_ends}_generation_{idx}") dependency_tree = generation_resp["dependency_tree"] entities = generation_resp["entities"] self.mlflg.html(dependency_tree, "dep-" + hash_string(generation.text)) self.mlflg.html(entities, "ent-" + hash_string(generation.text)) def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Run when LLM errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.metrics["step"] += 1 self.metrics["chain_starts"] += 1 self.metrics["starts"] += 1 chain_starts = self.metrics["chain_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()]) input_resp = deepcopy(resp) input_resp["inputs"] = chain_input self.records["on_chain_start_records"].append(input_resp) self.records["action_records"].append(input_resp) self.mlflg.jsonf(input_resp, f"chain_start_{chain_starts}") def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.metrics["step"] += 1 self.metrics["chain_ends"] += 1 self.metrics["ends"] += 1 chain_ends = self.metrics["chain_ends"] resp: Dict[str, Any] = {} chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()]) resp.update({"action": "on_chain_end", "outputs": chain_output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_chain_end_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"chain_end_{chain_ends}") def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: """Run when chain errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.metrics["step"] += 1 self.metrics["tool_starts"] += 1 self.metrics["starts"] += 1 tool_starts = self.metrics["tool_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_tool_start_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"tool_start_{tool_starts}") def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.metrics["step"] += 1 self.metrics["tool_ends"] += 1 self.metrics["ends"] += 1 tool_ends = self.metrics["tool_ends"] resp: Dict[str, Any] = {} resp.update({"action": "on_tool_end", "output": output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_tool_end_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"tool_end_{tool_ends}") def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: """Run when tool errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.metrics["step"] += 1 self.metrics["text_ctr"] += 1 text_ctr = self.metrics["text_ctr"] resp: Dict[str, Any] = {} resp.update({"action": "on_text", "text": text}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_text_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"on_text_{text_ctr}") def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.metrics["step"] += 1 self.metrics["agent_ends"] += 1 self.metrics["ends"] += 1 agent_ends = self.metrics["agent_ends"] resp: Dict[str, Any] = {} resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_agent_finish_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"agent_finish_{agent_ends}") def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.metrics["step"] += 1 self.metrics["tool_starts"] += 1 self.metrics["starts"] += 1 tool_starts = self.metrics["tool_starts"] resp: Dict[str, Any] = {} resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_agent_action_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"agent_action_{tool_starts}") def _create_session_analysis_df(self) -> Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.records["on_llm_start_records"]) on_llm_end_records_df = pd.DataFrame(self.records["on_llm_end_records"]) llm_input_columns = ["step", "prompt"] if "name" in on_llm_start_records_df.columns: llm_input_columns.append("name") elif "id" in on_llm_start_records_df.columns: # id is llm class's full import path. For example: # ["langchain", "llms", "openai", "AzureOpenAI"] on_llm_start_records_df["name"] = on_llm_start_records_df["id"].apply( lambda id_: id_[-1] ) llm_input_columns.append("name") llm_input_prompts_df = ( on_llm_start_records_df[llm_input_columns] .dropna(axis=1) .rename({"step": "prompt_step"}, axis=1) ) complexity_metrics_columns = [] visualizations_columns = [] complexity_metrics_columns = [ "flesch_reading_ease", "flesch_kincaid_grade", "smog_index", "coleman_liau_index", "automated_readability_index", "dale_chall_readability_score", "difficult_words", "linsear_write_formula", "gunning_fog", # "text_standard", "fernandez_huerta", "szigriszt_pazos", "gutierrez_polini", "crawford", "gulpease_index", "osman", ] visualizations_columns = ["dependency_tree", "entities"] llm_outputs_df = ( on_llm_end_records_df[ [ "step", "text", "token_usage_total_tokens", "token_usage_prompt_tokens", "token_usage_completion_tokens", ] + complexity_metrics_columns + visualizations_columns ] .dropna(axis=1) .rename({"step": "output_step", "text": "output"}, axis=1) ) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) session_analysis_df["chat_html"] = session_analysis_df[ ["prompt", "output"] ].apply( lambda row: construct_html_from_prompt_and_generation( row["prompt"], row["output"] ), axis=1, ) return session_analysis_df def flush_tracker(self, langchain_asset: Any = None, finish: bool = False) -> None: pd = import_pandas() self.mlflg.table("action_records", pd.DataFrame(self.records["action_records"])) session_analysis_df = self._create_session_analysis_df() chat_html = session_analysis_df.pop("chat_html") chat_html = chat_html.replace("\n", "", regex=True) self.mlflg.table("session_analysis", pd.DataFrame(session_analysis_df)) self.mlflg.html("".join(chat_html.tolist()), "chat_html") if langchain_asset: # To avoid circular import error # mlflow only supports LLMChain asset if "langchain.chains.llm.LLMChain" in str(type(langchain_asset)): self.mlflg.langchain_artifact(langchain_asset) else: langchain_asset_path = str(Path(self.temp_dir.name, "model.json")) try: langchain_asset.save(langchain_asset_path) self.mlflg.artifact(langchain_asset_path) except ValueError: try: langchain_asset.save_agent(langchain_asset_path) self.mlflg.artifact(langchain_asset_path) except AttributeError: print("Could not save model.") traceback.print_exc() pass except NotImplementedError: print("Could not save model.") traceback.print_exc() pass except NotImplementedError: print("Could not save model.") traceback.print_exc() pass if finish: self.mlflg.finish_run() self._reset()
[ "\n", "prompt_step" ]