id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 49
117
|
---|---|---|
961c25351698-94 | Tigris (class in langchain.vectorstores)
time (langchain.utilities.DuckDuckGoSearchAPIWrapper attribute)
to_typescript() (langchain.tools.APIOperation method)
token (langchain.llms.PredictionGuard attribute)
(langchain.utilities.PowerBIDataset attribute)
token_path (langchain.document_loaders.GoogleApiClient attribute)
(langchain.document_loaders.GoogleDriveLoader attribute)
Tokenizer (class in langchain.text_splitter)
tokenizer (langchain.llms.Petals attribute)
tokens (langchain.llms.AlephAlpha attribute)
tokens_path (langchain.llms.RWKV attribute)
tokens_per_chunk (langchain.text_splitter.Tokenizer attribute)
TokenTextSplitter (class in langchain.text_splitter)
ToMarkdownLoader (class in langchain.document_loaders)
TomlLoader (class in langchain.document_loaders)
tool() (in module langchain.agents)
(in module langchain.tools)
tool_run_logging_kwargs() (langchain.agents.Agent method)
(langchain.agents.BaseMultiActionAgent method)
(langchain.agents.BaseSingleActionAgent method)
(langchain.agents.LLMSingleActionAgent method)
tools (langchain.agents.agent_toolkits.JiraToolkit attribute)
(langchain.agents.agent_toolkits.ZapierToolkit attribute)
(langchain.agents.AgentExecutor attribute)
top_k (langchain.chains.GraphCypherQAChain attribute)
(langchain.chains.SQLDatabaseChain attribute)
(langchain.chat_models.ChatGooglePalm attribute)
(langchain.llms.AlephAlpha attribute)
(langchain.llms.Anthropic attribute)
(langchain.llms.ForefrontAI attribute)
(langchain.llms.GooglePalm attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.LlamaCpp attribute) | https://python.langchain.com/en/latest/genindex.html |
961c25351698-95 | (langchain.llms.LlamaCpp attribute)
(langchain.llms.NLPCloud attribute)
(langchain.llms.Petals attribute)
(langchain.llms.VertexAI attribute)
(langchain.retrievers.ChatGPTPluginRetriever attribute)
(langchain.retrievers.DataberryRetriever attribute)
(langchain.retrievers.PineconeHybridSearchRetriever attribute)
top_k_docs_for_context (langchain.chains.ChatVectorDBChain attribute)
top_k_results (langchain.utilities.ArxivAPIWrapper attribute)
(langchain.utilities.GooglePlacesAPIWrapper attribute)
(langchain.utilities.PubMedAPIWrapper attribute)
(langchain.utilities.WikipediaAPIWrapper attribute)
top_n (langchain.retrievers.document_compressors.CohereRerank attribute)
top_p (langchain.chat_models.ChatGooglePalm attribute)
(langchain.llms.AlephAlpha attribute)
(langchain.llms.Anthropic attribute)
(langchain.llms.AzureOpenAI attribute)
(langchain.llms.ForefrontAI attribute)
(langchain.llms.GooglePalm attribute)
(langchain.llms.GooseAI attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.LlamaCpp attribute)
(langchain.llms.NLPCloud attribute)
(langchain.llms.OpenAI attribute)
(langchain.llms.OpenLM attribute)
(langchain.llms.Petals attribute)
(langchain.llms.RWKV attribute)
(langchain.llms.VertexAI attribute)
(langchain.llms.Writer attribute)
topP (langchain.llms.AI21 attribute)
traits (langchain.experimental.GenerativeAgent attribute)
transform (langchain.chains.TransformChain attribute)
transform_documents() (langchain.document_transformers.EmbeddingsRedundantFilter method)
(langchain.text_splitter.TextSplitter method) | https://python.langchain.com/en/latest/genindex.html |
961c25351698-96 | (langchain.text_splitter.TextSplitter method)
transform_input_fn (langchain.llms.Databricks attribute)
transform_output_fn (langchain.llms.Databricks attribute)
transformers (langchain.retrievers.document_compressors.DocumentCompressorPipeline attribute)
TrelloLoader (class in langchain.document_loaders)
truncate (langchain.embeddings.CohereEmbeddings attribute)
(langchain.llms.Cohere attribute)
ts_type_from_python() (langchain.tools.APIOperation static method)
ttl (langchain.memory.RedisEntityStore attribute)
tuned_model_name (langchain.llms.VertexAI attribute)
TwitterTweetLoader (class in langchain.document_loaders)
type (langchain.output_parsers.ResponseSchema attribute)
(langchain.utilities.GoogleSerperAPIWrapper attribute)
Typesense (class in langchain.vectorstores)
U
unsecure (langchain.utilities.searx_search.SearxSearchWrapper attribute)
(langchain.utilities.SearxSearchWrapper attribute)
UnstructuredAPIFileIOLoader (class in langchain.document_loaders)
UnstructuredAPIFileLoader (class in langchain.document_loaders)
UnstructuredCSVLoader (class in langchain.document_loaders)
UnstructuredEmailLoader (class in langchain.document_loaders)
UnstructuredEPubLoader (class in langchain.document_loaders)
UnstructuredExcelLoader (class in langchain.document_loaders)
UnstructuredFileIOLoader (class in langchain.document_loaders)
UnstructuredFileLoader (class in langchain.document_loaders)
UnstructuredHTMLLoader (class in langchain.document_loaders)
UnstructuredImageLoader (class in langchain.document_loaders)
UnstructuredMarkdownLoader (class in langchain.document_loaders)
UnstructuredODTLoader (class in langchain.document_loaders) | https://python.langchain.com/en/latest/genindex.html |
961c25351698-97 | UnstructuredODTLoader (class in langchain.document_loaders)
UnstructuredPDFLoader (class in langchain.document_loaders)
UnstructuredPowerPointLoader (class in langchain.document_loaders)
UnstructuredRTFLoader (class in langchain.document_loaders)
UnstructuredURLLoader (class in langchain.document_loaders)
UnstructuredWordDocumentLoader (class in langchain.document_loaders)
UnstructuredXMLLoader (class in langchain.document_loaders)
update_document() (langchain.vectorstores.Chroma method)
update_forward_refs() (langchain.llms.AI21 class method)
(langchain.llms.AlephAlpha class method)
(langchain.llms.Anthropic class method)
(langchain.llms.Anyscale class method)
(langchain.llms.Aviary class method)
(langchain.llms.AzureOpenAI class method)
(langchain.llms.Banana class method)
(langchain.llms.Baseten class method)
(langchain.llms.Beam class method)
(langchain.llms.Bedrock class method)
(langchain.llms.CerebriumAI class method)
(langchain.llms.Cohere class method)
(langchain.llms.CTransformers class method)
(langchain.llms.Databricks class method)
(langchain.llms.DeepInfra class method)
(langchain.llms.FakeListLLM class method)
(langchain.llms.ForefrontAI class method)
(langchain.llms.GooglePalm class method)
(langchain.llms.GooseAI class method)
(langchain.llms.GPT4All class method)
(langchain.llms.HuggingFaceEndpoint class method)
(langchain.llms.HuggingFaceHub class method)
(langchain.llms.HuggingFacePipeline class method)
(langchain.llms.HuggingFaceTextGenInference class method) | https://python.langchain.com/en/latest/genindex.html |
961c25351698-98 | (langchain.llms.HuggingFaceTextGenInference class method)
(langchain.llms.HumanInputLLM class method)
(langchain.llms.LlamaCpp class method)
(langchain.llms.Modal class method)
(langchain.llms.MosaicML class method)
(langchain.llms.NLPCloud class method)
(langchain.llms.OpenAI class method)
(langchain.llms.OpenAIChat class method)
(langchain.llms.OpenLM class method)
(langchain.llms.Petals class method)
(langchain.llms.PipelineAI class method)
(langchain.llms.PredictionGuard class method)
(langchain.llms.PromptLayerOpenAI class method)
(langchain.llms.PromptLayerOpenAIChat class method)
(langchain.llms.Replicate class method)
(langchain.llms.RWKV class method)
(langchain.llms.SagemakerEndpoint class method)
(langchain.llms.SelfHostedHuggingFaceLLM class method)
(langchain.llms.SelfHostedPipeline class method)
(langchain.llms.StochasticAI class method)
(langchain.llms.VertexAI class method)
(langchain.llms.Writer class method)
upsert_messages() (langchain.memory.CosmosDBChatMessageHistory method)
url (langchain.document_loaders.GitHubIssuesLoader property)
(langchain.document_loaders.MathpixPDFLoader property)
(langchain.llms.Beam attribute)
(langchain.retrievers.ChatGPTPluginRetriever attribute)
(langchain.retrievers.RemoteLangChainRetriever attribute)
(langchain.tools.IFTTTWebhook attribute)
urls (langchain.document_loaders.PlaywrightURLLoader attribute)
(langchain.document_loaders.SeleniumURLLoader attribute)
use_mlock (langchain.embeddings.LlamaCppEmbeddings attribute) | https://python.langchain.com/en/latest/genindex.html |
961c25351698-99 | use_mlock (langchain.embeddings.LlamaCppEmbeddings attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.LlamaCpp attribute)
use_mmap (langchain.llms.LlamaCpp attribute)
use_multiplicative_presence_penalty (langchain.llms.AlephAlpha attribute)
use_query_checker (langchain.chains.SQLDatabaseChain attribute)
username (langchain.vectorstores.ClickhouseSettings attribute)
(langchain.vectorstores.MyScaleSettings attribute)
V
validate_channel_or_videoIds_is_set() (langchain.document_loaders.GoogleApiClient class method)
(langchain.document_loaders.GoogleApiYoutubeLoader class method)
validate_init_args() (langchain.document_loaders.ConfluenceLoader static method)
validate_template (langchain.prompts.FewShotPromptTemplate attribute)
(langchain.prompts.FewShotPromptWithTemplates attribute)
(langchain.prompts.PromptTemplate attribute)
Vectara (class in langchain.vectorstores)
vector_field (langchain.vectorstores.SingleStoreDB attribute)
vectorizer (langchain.retrievers.TFIDFRetriever attribute)
VectorStore (class in langchain.vectorstores)
vectorstore (langchain.agents.agent_toolkits.VectorStoreInfo attribute)
(langchain.chains.ChatVectorDBChain attribute)
(langchain.chains.VectorDBQA attribute)
(langchain.chains.VectorDBQAWithSourcesChain attribute)
(langchain.prompts.example_selector.SemanticSimilarityExampleSelector attribute)
(langchain.retrievers.SelfQueryRetriever attribute)
(langchain.retrievers.TimeWeightedVectorStoreRetriever attribute)
vectorstore_info (langchain.agents.agent_toolkits.VectorStoreToolkit attribute)
vectorstores (langchain.agents.agent_toolkits.VectorStoreRouterToolkit attribute)
verbose (langchain.llms.AI21 attribute)
(langchain.llms.AlephAlpha attribute) | https://python.langchain.com/en/latest/genindex.html |
961c25351698-100 | (langchain.llms.AlephAlpha attribute)
(langchain.llms.Anthropic attribute)
(langchain.llms.Anyscale attribute)
(langchain.llms.Aviary attribute)
(langchain.llms.AzureOpenAI attribute)
(langchain.llms.Banana attribute)
(langchain.llms.Baseten attribute)
(langchain.llms.Beam attribute)
(langchain.llms.Bedrock attribute)
(langchain.llms.CerebriumAI attribute)
(langchain.llms.Cohere attribute)
(langchain.llms.CTransformers attribute)
(langchain.llms.Databricks attribute)
(langchain.llms.DeepInfra attribute)
(langchain.llms.FakeListLLM attribute)
(langchain.llms.ForefrontAI attribute)
(langchain.llms.GooglePalm attribute)
(langchain.llms.GooseAI attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.HuggingFaceEndpoint attribute)
(langchain.llms.HuggingFaceHub attribute)
(langchain.llms.HuggingFacePipeline attribute)
(langchain.llms.HuggingFaceTextGenInference attribute)
(langchain.llms.HumanInputLLM attribute)
(langchain.llms.LlamaCpp attribute)
(langchain.llms.Modal attribute)
(langchain.llms.MosaicML attribute)
(langchain.llms.NLPCloud attribute)
(langchain.llms.OpenAI attribute)
(langchain.llms.OpenAIChat attribute)
(langchain.llms.OpenLM attribute)
(langchain.llms.Petals attribute)
(langchain.llms.PipelineAI attribute)
(langchain.llms.PredictionGuard attribute)
(langchain.llms.Replicate attribute)
(langchain.llms.RWKV attribute)
(langchain.llms.SagemakerEndpoint attribute)
(langchain.llms.SelfHostedHuggingFaceLLM attribute) | https://python.langchain.com/en/latest/genindex.html |
961c25351698-101 | (langchain.llms.SelfHostedHuggingFaceLLM attribute)
(langchain.llms.SelfHostedPipeline attribute)
(langchain.llms.StochasticAI attribute)
(langchain.llms.VertexAI attribute)
(langchain.llms.Writer attribute)
(langchain.retrievers.SelfQueryRetriever attribute)
(langchain.tools.BaseTool attribute)
(langchain.tools.Tool attribute)
VespaRetriever (class in langchain.retrievers)
video_ids (langchain.document_loaders.GoogleApiYoutubeLoader attribute)
visible_only (langchain.tools.ClickTool attribute)
vocab_only (langchain.embeddings.LlamaCppEmbeddings attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.LlamaCpp attribute)
W
wait_for_processing() (langchain.document_loaders.MathpixPDFLoader method)
WeatherDataLoader (class in langchain.document_loaders)
Weaviate (class in langchain.vectorstores)
WeaviateHybridSearchRetriever (class in langchain.retrievers)
WeaviateHybridSearchRetriever.Config (class in langchain.retrievers)
web_path (langchain.document_loaders.WebBaseLoader property)
web_paths (langchain.document_loaders.WebBaseLoader attribute)
WebBaseLoader (class in langchain.document_loaders)
WhatsAppChatLoader (class in langchain.document_loaders)
Wikipedia (class in langchain.docstore)
WikipediaLoader (class in langchain.document_loaders)
wolfram_alpha_appid (langchain.utilities.WolframAlphaAPIWrapper attribute)
writer_api_key (langchain.llms.Writer attribute)
writer_org_id (langchain.llms.Writer attribute)
Y
YoutubeLoader (class in langchain.document_loaders)
Z
zapier_description (langchain.tools.ZapierNLARunAction attribute) | https://python.langchain.com/en/latest/genindex.html |
961c25351698-102 | Z
zapier_description (langchain.tools.ZapierNLARunAction attribute)
ZepRetriever (class in langchain.retrievers)
ZERO_SHOT_REACT_DESCRIPTION (langchain.agents.AgentType attribute)
Zilliz (class in langchain.vectorstores)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/genindex.html |
a1bc0fa44abd-0 | .rst
.pdf
Welcome to LangChain
Contents
Getting Started
Modules
Use Cases
Reference Docs
Ecosystem
Additional Resources
Welcome to LangChain#
LangChain is a framework for developing applications powered by language models. We believe that the most powerful and differentiated applications will not only call out to a language model, but will also be:
Data-aware: connect a language model to other sources of data
Agentic: allow a language model to interact with its environment
The LangChain framework is designed around these principles.
This is the Python specific portion of the documentation. For a purely conceptual guide to LangChain, see here. For the JavaScript documentation, see here.
Getting Started#
How to get started using LangChain to create an Language Model application.
Quickstart Guide
Concepts and terminology.
Concepts and terminology
Tutorials created by community experts and presented on YouTube.
Tutorials
Modules#
These modules are the core abstractions which we view as the building blocks of any LLM-powered application.
For each module LangChain provides standard, extendable interfaces. LangChain also provides external integrations and even end-to-end implementations for off-the-shelf use.
The docs for each module contain quickstart examples, how-to guides, reference docs, and conceptual guides.
The modules are (from least to most complex):
Models: Supported model types and integrations.
Prompts: Prompt management, optimization, and serialization.
Memory: Memory refers to state that is persisted between calls of a chain/agent.
Indexes: Language models become much more powerful when combined with application-specific data - this module contains interfaces and integrations for loading, querying and updating external data.
Chains: Chains are structured sequences of calls (to an LLM or to a different utility). | https://python.langchain.com/en/latest/index.html |
a1bc0fa44abd-1 | Agents: An agent is a Chain in which an LLM, given a high-level directive and a set of tools, repeatedly decides an action, executes the action and observes the outcome until the high-level directive is complete.
Callbacks: Callbacks let you log and stream the intermediate steps of any chain, making it easy to observe, debug, and evaluate the internals of an application.
Use Cases#
Best practices and built-in implementations for common LangChain use cases:
Autonomous Agents: Autonomous agents are long-running agents that take many steps in an attempt to accomplish an objective. Examples include AutoGPT and BabyAGI.
Agent Simulations: Putting agents in a sandbox and observing how they interact with each other and react to events can be an effective way to evaluate their long-range reasoning and planning abilities.
Personal Assistants: One of the primary LangChain use cases. Personal assistants need to take actions, remember interactions, and have knowledge about your data.
Question Answering: Another common LangChain use case. Answering questions over specific documents, only utilizing the information in those documents to construct an answer.
Chatbots: Language models love to chat, making this a very natural use of them.
Querying Tabular Data: Recommended reading if you want to use language models to query structured data (CSVs, SQL, dataframes, etc).
Code Understanding: Recommended reading if you want to use language models to analyze code.
Interacting with APIs: Enabling language models to interact with APIs is extremely powerful. It gives them access to up-to-date information and allows them to take actions.
Extraction: Extract structured information from text.
Summarization: Compressing longer documents. A type of Data-Augmented Generation.
Evaluation: Generative models are hard to evaluate with traditional metrics. One promising approach is to use language models themselves to do the evaluation.
Reference Docs# | https://python.langchain.com/en/latest/index.html |
a1bc0fa44abd-2 | Reference Docs#
Full documentation on all methods, classes, installation methods, and integration setups for LangChain.
LangChain Installation
Reference Documentation
Ecosystem#
LangChain integrates a lot of different LLMs, systems, and products.
From the other side, many systems and products depend on LangChain.
It creates a vibrant and thriving ecosystem.
Integrations: Guides for how other products can be used with LangChain.
Dependents: List of repositories that use LangChain.
Deployments: A collection of instructions, code snippets, and template repositories for deploying LangChain apps.
Additional Resources#
Additional resources we think may be useful as you develop your application!
LangChainHub: The LangChainHub is a place to share and explore other prompts, chains, and agents.
Gallery: A collection of great projects that use Langchain, compiled by the folks at Kyrolabs. Useful for finding inspiration and example implementations.
Deploying LLMs in Production: A collection of best practices and tutorials for deploying LLMs in production.
Tracing: A guide on using tracing in LangChain to visualize the execution of chains and agents.
Model Laboratory: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.
Discord: Join us on our Discord to discuss all things LangChain!
YouTube: A collection of the LangChain tutorials and videos.
Production Support: As you move your LangChains into production, we’d love to offer more comprehensive support. Please fill out this form and we’ll set up a dedicated support Slack channel.
next
Quickstart Guide
Contents
Getting Started
Modules
Use Cases
Reference Docs
Ecosystem
Additional Resources
By Harrison Chase
© Copyright 2023, Harrison Chase. | https://python.langchain.com/en/latest/index.html |
a1bc0fa44abd-3 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/index.html |
bc75ce6b3dde-0 | .rst
.pdf
Integrations
Contents
Integrations by Module
Dependencies
All Integrations
Integrations#
LangChain integrates with many LLMs, systems, and products.
Integrations by Module#
Integrations grouped by the core LangChain module they map to:
LLM Providers
Chat Model Providers
Text Embedding Model Providers
Document Loader Integrations
Text Splitter Integrations
Vectorstore Providers
Retriever Providers
Tool Providers
Toolkit Integrations
Dependencies#
LangChain depends on several hungered Python packages.
All Integrations#
A comprehensive list of LLMs, systems, and products integrated with LangChain:
Tracing Walkthrough
AI21 Labs
Aim
Airbyte
Aleph Alpha
Amazon Bedrock
AnalyticDB
Annoy
Anthropic
Anyscale
Apify
Argilla
Arxiv
AtlasDB
AwaDB
AWS S3 Directory
AZLyrics
Azure Blob Storage
Azure Cognitive Search
Azure OpenAI
Banana
Beam
BiliBili
Blackboard
Cassandra
CerebriumAI
Chroma
ClearML
ClickHouse
Cohere
College Confidential
Comet
Confluence
C Transformers
Databerry
Databricks
DeepInfra
Deep Lake
Diffbot
Discord
Docugami
DuckDB
Elasticsearch
EverNote
Facebook Chat
Figma
ForefrontAI
Git
GitBook
Google BigQuery
Google Cloud Storage
Google Drive
Google Search
Google Serper
Google Vertex AI
GooseAI
GPT4All
Graphsignal
Gutenberg
Hacker News
Hazy Research
Helicone
Hugging Face
iFixit
IMSDb
Jina
LanceDB
Llama.cpp
MediaWikiDump
Metal
Microsoft OneDrive
Microsoft PowerPoint | https://python.langchain.com/en/latest/integrations.html |
bc75ce6b3dde-1 | Llama.cpp
MediaWikiDump
Metal
Microsoft OneDrive
Microsoft PowerPoint
Microsoft Word
Milvus
MLflow
Modal
Modern Treasury
Momento
MyScale
NLPCloud
Notion DB
Obsidian
OpenAI
OpenSearch
OpenWeatherMap
Petals
PGVector
Pinecone
PipelineAI
Prediction Guard
PromptLayer
Psychic
Qdrant
Ray Serve
Rebuff
Reddit
Redis
Replicate
Roam
Runhouse
RWKV-4
SageMaker Endpoint
SearxNG Search API
SerpAPI
Shale Protocol
scikit-learn
Slack
spaCy
Spreedly
StochasticAI
Stripe
Tair
Telegram
Tensorflow Hub
2Markdown
Trello
Twitter
Unstructured
Vectara
Vespa
Weights & Biases
Weather
Weaviate
WhatsApp
WhyLabs
Wikipedia
Wolfram Alpha
Writer
Yeager.ai
YouTube
Zep
Zilliz
previous
Experimental Modules
next
Tracing Walkthrough
Contents
Integrations by Module
Dependencies
All Integrations
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/integrations.html |
af52fc0a2ee1-0 | .md
.pdf
Dependents
Dependents#
Dependents stats for hwchase17/langchain
[update: 2023-06-05; only dependent repositories with Stars > 100]
Repository
Stars
openai/openai-cookbook
38024
LAION-AI/Open-Assistant
33609
microsoft/TaskMatrix
33136
hpcaitech/ColossalAI
30032
imartinez/privateGPT
28094
reworkd/AgentGPT
23430
openai/chatgpt-retrieval-plugin
17942
jerryjliu/llama_index
16697
mindsdb/mindsdb
16410
mlflow/mlflow
14517
GaiZhenbiao/ChuanhuChatGPT
10793
databrickslabs/dolly
10155
openai/evals
10076
AIGC-Audio/AudioGPT
8619
logspace-ai/langflow
8211
imClumsyPanda/langchain-ChatGLM
8154
PromtEngineer/localGPT
6853
StanGirard/quivr
6830
PipedreamHQ/pipedream
6520
go-skynet/LocalAI
6018
arc53/DocsGPT
5643
e2b-dev/e2b
5075
langgenius/dify
4281
nsarrazin/serge
4228
zauberzeug/nicegui
4084
madawei2699/myGPTReader
4039
wenda-LLM/wenda
3871
GreyDGL/PentestGPT
3837
zilliztech/GPTCache
3625
csunny/DB-GPT
3545
gkamradt/langchain-tutorials
3404 | https://python.langchain.com/en/latest/dependents.html |
af52fc0a2ee1-1 | 3545
gkamradt/langchain-tutorials
3404
mmabrouk/chatgpt-wrapper
3303
postgresml/postgresml
3052
marqo-ai/marqo
3014
MineDojo/Voyager
2945
PrefectHQ/marvin
2761
project-baize/baize-chatbot
2673
hwchase17/chat-langchain
2589
whitead/paper-qa
2572
Azure-Samples/azure-search-openai-demo
2366
GerevAI/gerev
2330
OpenGVLab/InternGPT
2289
ParisNeo/gpt4all-ui
2159
OpenBMB/BMTools
2158
guangzhengli/ChatFiles
2005
h2oai/h2ogpt
1939
Farama-Foundation/PettingZoo
1845
OpenGVLab/Ask-Anything
1749
IntelligenzaArtificiale/Free-Auto-GPT
1740
Unstructured-IO/unstructured
1628
hwchase17/notion-qa
1607
NVIDIA/NeMo-Guardrails
1544
SamurAIGPT/privateGPT
1543
paulpierre/RasaGPT
1526
yanqiangmiffy/Chinese-LangChain
1485
Kav-K/GPTDiscord
1402
vocodedev/vocode-python
1387
Chainlit/chainlit
1336
lunasec-io/lunasec
1323
psychic-api/psychic
1248
agiresearch/OpenAGI
1208
jina-ai/thinkgpt
1193
thomas-yanxin/LangChain-ChatGLM-Webui
1182 | https://python.langchain.com/en/latest/dependents.html |
af52fc0a2ee1-2 | thomas-yanxin/LangChain-ChatGLM-Webui
1182
ttengwang/Caption-Anything
1137
jina-ai/dev-gpt
1135
greshake/llm-security
1086
keephq/keep
1063
juncongmoo/chatllama
1037
richardyc/Chrome-GPT
1035
visual-openllm/visual-openllm
997
mmz-001/knowledge_gpt
995
jina-ai/langchain-serve
949
irgolic/AutoPR
936
microsoft/X-Decoder
908
poe-platform/api-bot-tutorial
902
peterw/Chat-with-Github-Repo
875
cirediatpl/FigmaChain
822
homanp/superagent
806
seanpixel/Teenage-AGI
800
chatarena/chatarena
796
hashintel/hash
795
SamurAIGPT/Camel-AutoGPT
786
rlancemartin/auto-evaluator
770
corca-ai/EVAL
769
101dotxyz/GPTeam
755
noahshinn024/reflexion
706
eyurtsev/kor
695
cheshire-cat-ai/core
681
e-johnstonn/BriefGPT
656
run-llama/llama-lab
635
griptape-ai/griptape
583
namuan/dr-doc-search
555
getmetal/motorhead
550
kreneskyp/ix
543
hwchase17/chat-your-data
510
Anil-matcha/ChatPDF
501
whyiyhw/chatgpt-wechat
497
SamurAIGPT/ChatGPT-Developer-Plugins
496
microsoft/PodcastCopilot
492
debanjum/khoj | https://python.langchain.com/en/latest/dependents.html |
af52fc0a2ee1-3 | 496
microsoft/PodcastCopilot
492
debanjum/khoj
485
akshata29/chatpdf
485
langchain-ai/langchain-aiplugin
462
jina-ai/agentchain
460
alexanderatallah/window.ai
457
yeagerai/yeagerai-agent
451
mckaywrigley/repo-chat
446
michaelthwan/searchGPT
446
mpaepper/content-chatbot
441
freddyaboulton/gradio-tools
439
ruoccofabrizio/azure-open-ai-embeddings-qna
429
StevenGrove/GPT4Tools
422
jonra1993/fastapi-alembic-sqlmodel-async
407
msoedov/langcorn
405
amosjyng/langchain-visualizer
395
ajndkr/lanarky
384
mtenenholtz/chat-twitter
376
steamship-core/steamship-langchain
371
langchain-ai/auto-evaluator
365
xuwenhao/geektime-ai-course
358
continuum-llms/chatgpt-memory
357
opentensor/bittensor
347
showlab/VLog
345
daodao97/chatdoc
345
logan-markewich/llama_index_starter_pack
332
poe-platform/poe-protocol
320
explosion/spacy-llm
312
andylokandy/gpt-4-search
311
alejandro-ao/langchain-ask-pdf
310
jupyterlab/jupyter-ai
294
BlackHC/llm-strategy
283
itamargol/openai
281
momegas/megabots
279
personoids/personoids-lite
277
yvann-hub/Robby-chatbot
267
Anil-matcha/Website-to-Chatbot | https://python.langchain.com/en/latest/dependents.html |
af52fc0a2ee1-4 | 267
Anil-matcha/Website-to-Chatbot
266
Cheems-Seminar/grounded-segment-any-parts
260
sullivan-sean/chat-langchainjs
248
bborn/howdoi.ai
245
daveebbelaar/langchain-experiments
240
MagnivOrg/prompt-layer-library
237
ur-whitelab/exmol
234
conceptofmind/toolformer
234
recalign/RecAlign
226
OpenBMB/AgentVerse
220
alvarosevilla95/autolang
219
JohnSnowLabs/nlptest
216
kaleido-lab/dolphin
215
truera/trulens
208
NimbleBoxAI/ChainFury
208
airobotlab/KoChatGPT
207
monarch-initiative/ontogpt
200
paolorechia/learn-langchain
195
shaman-ai/agent-actors
185
Haste171/langchain-chatbot
184
plchld/InsightFlow
182
su77ungr/CASALIOY
180
jbrukh/gpt-jargon
177
benthecoder/ClassGPT
174
billxbf/ReWOO
170
filip-michalsky/SalesGPT
168
hwchase17/langchain-streamlit-template
168
radi-cho/datasetGPT
164
hardbyte/qabot
164
gia-guar/JARVIS-ChatGPT
158
plastic-labs/tutor-gpt
154
yasyf/compress-gpt
154
fengyuli-dev/multimedia-gpt
154
ethanyanjiali/minChatGPT
153
hwchase17/chroma-langchain
153
edreisMD/plugnplai
148
chakkaradeep/pyCodeAGI
145 | https://python.langchain.com/en/latest/dependents.html |
af52fc0a2ee1-5 | 148
chakkaradeep/pyCodeAGI
145
ccurme/yolopandas
145
shamspias/customizable-gpt-chatbot
144
realminchoi/babyagi-ui
143
PradipNichite/Youtube-Tutorials
140
gustavz/DataChad
140
Klingefjord/chatgpt-telegram
140
Jaseci-Labs/jaseci
139
handrew/browserpilot
137
jmpaz/promptlib
137
SamPink/dev-gpt
135
menloparklab/langchain-cohere-qdrant-doc-retrieval
135
hirokidaichi/wanna
135
steamship-core/vercel-examples
134
pablomarin/GPT-Azure-Search-Engine
133
ibiscp/LLM-IMDB
133
shauryr/S2QA
133
jerlendds/osintbuddy
132
yuanjie-ai/ChatLLM
132
yasyf/summ
132
WongSaang/chatgpt-ui-server
130
peterw/StoryStorm
127
Teahouse-Studios/akari-bot
126
vaibkumr/prompt-optimizer
125
preset-io/promptimize
124
homanp/vercel-langchain
124
petehunt/langchain-github-bot
123
eunomia-bpf/GPTtrace
118
nicknochnack/LangchainDocuments
116
jiran214/GPT-vup
112
rsaryev/talk-codebase
112
zenml-io/zenml-projects
112
microsoft/azure-openai-in-a-day-workshop
112
davila7/file-gpt
112
prof-frink-lab/slangchain
111
aurelio-labs/arxiv-bot
110 | https://python.langchain.com/en/latest/dependents.html |
af52fc0a2ee1-6 | 111
aurelio-labs/arxiv-bot
110
fixie-ai/fixie-examples
108
miaoshouai/miaoshouai-assistant
105
flurb18/AgentOoba
103
solana-labs/chatgpt-plugin
102
Significant-Gravitas/Auto-GPT-Benchmarks
102
kaarthik108/snowChat
100
Generated by github-dependents-info
github-dependents-info --repo hwchase17/langchain --markdownfile dependents.md --minstars 100 --sort stars
previous
Zilliz
next
Deployments
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/dependents.html |
46379634f13f-0 | Source code for langchain.requests
"""Lightweight wrapper around requests library, with async support."""
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Dict, Optional
import aiohttp
import requests
from pydantic import BaseModel, Extra
class Requests(BaseModel):
"""Wrapper around requests to handle auth and async.
The main purpose of this wrapper is to handle authentication (by saving
headers) and enable easy async methods on the same base object.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def get(self, url: str, **kwargs: Any) -> requests.Response:
"""GET the URL and return the text."""
return requests.get(url, headers=self.headers, **kwargs)
def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""POST to the URL and return the text."""
return requests.post(url, json=data, headers=self.headers, **kwargs)
def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""PATCH the URL and return the text."""
return requests.patch(url, json=data, headers=self.headers, **kwargs)
def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""PUT the URL and return the text."""
return requests.put(url, json=data, headers=self.headers, **kwargs)
def delete(self, url: str, **kwargs: Any) -> requests.Response: | https://python.langchain.com/en/latest/_modules/langchain/requests.html |
46379634f13f-1 | def delete(self, url: str, **kwargs: Any) -> requests.Response:
"""DELETE the URL and return the text."""
return requests.delete(url, headers=self.headers, **kwargs)
@asynccontextmanager
async def _arequest(
self, method: str, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""Make an async request."""
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.request(
method, url, headers=self.headers, **kwargs
) as response:
yield response
else:
async with self.aiosession.request(
method, url, headers=self.headers, **kwargs
) as response:
yield response
@asynccontextmanager
async def aget(
self, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""GET the URL and return the text asynchronously."""
async with self._arequest("GET", url, **kwargs) as response:
yield response
@asynccontextmanager
async def apost(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""POST to the URL and return the text asynchronously."""
async with self._arequest("POST", url, **kwargs) as response:
yield response
@asynccontextmanager
async def apatch(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PATCH the URL and return the text asynchronously.""" | https://python.langchain.com/en/latest/_modules/langchain/requests.html |
46379634f13f-2 | """PATCH the URL and return the text asynchronously."""
async with self._arequest("PATCH", url, **kwargs) as response:
yield response
@asynccontextmanager
async def aput(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PUT the URL and return the text asynchronously."""
async with self._arequest("PUT", url, **kwargs) as response:
yield response
@asynccontextmanager
async def adelete(
self, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""DELETE the URL and return the text asynchronously."""
async with self._arequest("DELETE", url, **kwargs) as response:
yield response
[docs]class TextRequestsWrapper(BaseModel):
"""Lightweight wrapper around requests library.
The main purpose of this wrapper is to always return a text output.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def requests(self) -> Requests:
return Requests(headers=self.headers, aiosession=self.aiosession)
[docs] def get(self, url: str, **kwargs: Any) -> str:
"""GET the URL and return the text."""
return self.requests.get(url, **kwargs).text
[docs] def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: | https://python.langchain.com/en/latest/_modules/langchain/requests.html |
46379634f13f-3 | """POST to the URL and return the text."""
return self.requests.post(url, data, **kwargs).text
[docs] def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PATCH the URL and return the text."""
return self.requests.patch(url, data, **kwargs).text
[docs] def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PUT the URL and return the text."""
return self.requests.put(url, data, **kwargs).text
[docs] def delete(self, url: str, **kwargs: Any) -> str:
"""DELETE the URL and return the text."""
return self.requests.delete(url, **kwargs).text
[docs] async def aget(self, url: str, **kwargs: Any) -> str:
"""GET the URL and return the text asynchronously."""
async with self.requests.aget(url, **kwargs) as response:
return await response.text()
[docs] async def apost(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""POST to the URL and return the text asynchronously."""
async with self.requests.apost(url, **kwargs) as response:
return await response.text()
[docs] async def apatch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PATCH the URL and return the text asynchronously."""
async with self.requests.apatch(url, **kwargs) as response:
return await response.text()
[docs] async def aput(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: | https://python.langchain.com/en/latest/_modules/langchain/requests.html |
46379634f13f-4 | """PUT the URL and return the text asynchronously."""
async with self.requests.aput(url, **kwargs) as response:
return await response.text()
[docs] async def adelete(self, url: str, **kwargs: Any) -> str:
"""DELETE the URL and return the text asynchronously."""
async with self.requests.adelete(url, **kwargs) as response:
return await response.text()
# For backwards compatibility
RequestsWrapper = TextRequestsWrapper
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/requests.html |
e990308dc849-0 | Source code for langchain.text_splitter
"""Functionality for splitting text."""
from __future__ import annotations
import copy
import logging
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Iterable,
List,
Literal,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from langchain.docstore.document import Document
from langchain.schema import BaseDocumentTransformer
logger = logging.getLogger(__name__)
TS = TypeVar("TS", bound="TextSplitter")
def _split_text_with_regex(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)]
if len(_splits) % 2 == 0:
splits += _splits[-1:]
splits = [_splits[0]] + splits
else:
splits = text.split(separator)
else:
splits = list(text)
return [s for s in splits if s != ""]
[docs]class TextSplitter(BaseDocumentTransformer, ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200, | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-1 | chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
keep_separator: bool = False,
add_start_index: bool = False,
) -> None:
"""Create a new TextSplitter.
Args:
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
length_function: Function that measures the length of given chunks
keep_separator: Whether or not to keep the separator in the chunks
add_start_index: If `True`, includes chunk's start index in metadata
"""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
self._keep_separator = keep_separator
self._add_start_index = add_start_index
[docs] @abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
[docs] def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = -1
for chunk in self.split_text(text):
metadata = copy.deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata["start_index"] = index | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-2 | metadata["start_index"] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
[docs] def split_documents(self, documents: Iterable[Document]) -> List[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas)
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if: | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-3 | docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
[docs] @classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
[docs] @classmethod
def from_tiktoken_encoder(
cls: Type[TS], | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-4 | def from_tiktoken_encoder(
cls: Type[TS],
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> TS:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
)
)
if issubclass(cls, TokenTextSplitter):
extra_kwargs = {
"encoding_name": encoding_name,
"model_name": model_name,
"allowed_special": allowed_special,
"disallowed_special": disallowed_special,
}
kwargs = {**kwargs, **extra_kwargs}
return cls(length_function=_tiktoken_encoder, **kwargs)
[docs] def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(list(documents)) | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-5 | return self.split_documents(list(documents))
[docs] async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a sequence of documents by splitting them."""
raise NotImplementedError
[docs]class CharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = _split_text_with_regex(text, self._separator, self._keep_separator)
_separator = "" if self._keep_separator else self._separator
return self._merge_splits(splits, _separator)
# should be in newer Python versions (3.10+)
# @dataclass(frozen=True, kw_only=True, slots=True)
[docs]@dataclass(frozen=True)
class Tokenizer:
chunk_overlap: int
tokens_per_chunk: int
decode: Callable[[list[int]], str]
encode: Callable[[str], List[int]]
[docs]def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]:
"""Split incoming text and return chunks."""
splits: List[str] = []
input_ids = tokenizer.encode(text)
start_idx = 0
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx] | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-6 | chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(tokenizer.decode(chunk_ids))
start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
[docs]class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special
[docs] def split_text(self, text: str) -> List[str]:
def _encode(_text: str) -> List[int]:
return self._tokenizer.encode(
_text,
allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special, | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-7 | allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special,
)
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self._chunk_size,
decode=self._tokenizer.decode,
encode=_encode,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
[docs]class SentenceTransformersTokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
chunk_overlap: int = 50,
model_name: str = "sentence-transformers/all-mpnet-base-v2",
tokens_per_chunk: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs, chunk_overlap=chunk_overlap)
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError(
"Could not import sentence_transformer python package. "
"This is needed in order to for SentenceTransformersTokenTextSplitter. "
"Please install it with `pip install sentence-transformers`."
)
self.model_name = model_name
self._model = SentenceTransformer(self.model_name)
self.tokenizer = self._model.tokenizer
self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk)
def _initialize_chunk_configuration(
self, *, tokens_per_chunk: Optional[int]
) -> None:
self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length)
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-8 | else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}'"
f" is: {self.maximum_tokens_per_chunk}."
f" Argument tokens_per_chunk={self.tokens_per_chunk}"
f" > maximum token limit."
)
[docs] def split_text(self, text: str) -> List[str]:
def encode_strip_start_and_stop_token_ids(text: str) -> List[int]:
return self._encode(text)[1:-1]
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self.tokens_per_chunk,
decode=self.tokenizer.decode,
encode=encode_strip_start_and_stop_token_ids,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
[docs] def count_tokens(self, *, text: str) -> int:
return len(self._encode(text))
_max_length_equal_32_bit_integer = 2**32
def _encode(self, text: str) -> List[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(
text,
max_length=self._max_length_equal_32_bit_integer,
truncation="do_not_truncate",
)
return token_ids_with_start_and_end_token_ids
[docs]class Language(str, Enum):
CPP = "cpp"
GO = "go"
JAVA = "java"
JS = "js"
PHP = "php"
PROTO = "proto"
PYTHON = "python"
RST = "rst"
RUBY = "ruby"
RUST = "rust" | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-9 | RUBY = "ruby"
RUST = "rust"
SCALA = "scala"
SWIFT = "swift"
MARKDOWN = "markdown"
LATEX = "latex"
HTML = "html"
[docs]class RecursiveCharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
if _s == "":
separator = _s
break
if re.search(_s, text):
separator = _s
new_separators = separators[i + 1 :]
break
splits = _split_text_with_regex(text, separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s) | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-10 | _good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
[docs] def split_text(self, text: str) -> List[str]:
return self._split_text(text, self._separators)
[docs] @classmethod
def from_language(
cls, language: Language, **kwargs: Any
) -> RecursiveCharacterTextSplitter:
separators = cls.get_separators_for_language(language)
return cls(separators=separators, **kwargs)
[docs] @staticmethod
def get_separators_for_language(language: Language) -> List[str]:
if language == Language.CPP:
return [
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nvoid ",
"\nint ",
"\nfloat ",
"\ndouble ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.GO:
return [
# Split along function definitions | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-11 | elif language == Language.GO:
return [
# Split along function definitions
"\nfunc ",
"\nvar ",
"\nconst ",
"\ntype ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JAVA:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JS:
return [
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PHP:
return [
# Split along function definitions
"\nfunction ",
# Split along class definitions
"\nclass ", | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-12 | "\nfunction ",
# Split along class definitions
"\nclass ",
# Split along control flow statements
"\nif ",
"\nforeach ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PROTO:
return [
# Split along message definitions
"\nmessage ",
# Split along service definitions
"\nservice ",
# Split along enum definitions
"\nenum ",
# Split along option definitions
"\noption ",
# Split along import statements
"\nimport ",
# Split along syntax declarations
"\nsyntax ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PYTHON:
return [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RST:
return [
# Split along section titles
"\n=+\n",
"\n-+\n",
"\n\*+\n",
# Split along directive markers
"\n\n.. *\n\n",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUBY:
return [ | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-13 | "",
]
elif language == Language.RUBY:
return [
# Split along method definitions
"\ndef ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nunless ",
"\nwhile ",
"\nfor ",
"\ndo ",
"\nbegin ",
"\nrescue ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUST:
return [
# Split along function definitions
"\nfn ",
"\nconst ",
"\nlet ",
# Split along control flow statements
"\nif ",
"\nwhile ",
"\nfor ",
"\nloop ",
"\nmatch ",
"\nconst ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SCALA:
return [
# Split along class definitions
"\nclass ",
"\nobject ",
# Split along method definitions
"\ndef ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nmatch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SWIFT:
return [
# Split along function definitions
"\nfunc ",
# Split along class definitions
"\nclass ", | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-14 | "\nfunc ",
# Split along class definitions
"\nclass ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.MARKDOWN:
return [
# First, try to split along Markdown headings (starting with level 2)
"\n#{1,6} ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n",
# Horizontal lines
"\n\*\*\*+\n",
"\n---+\n",
"\n___+\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
elif language == Language.LATEX:
return [
# First, try to split along Latex sections
"\n\\\chapter{",
"\n\\\section{",
"\n\\\subsection{",
"\n\\\subsubsection{",
# Now split by environments
"\n\\\begin{enumerate}",
"\n\\\begin{itemize}",
"\n\\\begin{description}",
"\n\\\begin{list}",
"\n\\\begin{quote}",
"\n\\\begin{quotation}", | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-15 | "\n\\\begin{quote}",
"\n\\\begin{quotation}",
"\n\\\begin{verse}",
"\n\\\begin{verbatim}",
## Now split by math environments
"\n\\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
elif language == Language.HTML:
return [
# First, try to split along HTML tags
"<body",
"<div",
"<p",
"<br",
"<li",
"<h1",
"<h2",
"<h3",
"<h4",
"<h5",
"<h6",
"<span",
"<table",
"<tr",
"<td",
"<th",
"<ul",
"<ol",
"<header",
"<footer",
"<nav",
# Head
"<head",
"<style",
"<script",
"<meta",
"<title",
"",
]
else:
raise ValueError(
f"Language {language} is not supported! "
f"Please choose from {list(Language)}"
)
[docs]class NLTKTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using NLTK."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError( | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-16 | self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
[docs]class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy."""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
) -> None:
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
self._tokenizer = spacy.load(pipeline)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (str(s) for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
# For backwards compatibility
[docs]class PythonCodeTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a PythonCodeTextSplitter.""" | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e990308dc849-17 | """Initialize a PythonCodeTextSplitter."""
separators = self.get_separators_for_language(Language.PYTHON)
super().__init__(separators=separators, **kwargs)
[docs]class MarkdownTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Markdown-formatted headings."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a MarkdownTextSplitter."""
separators = self.get_separators_for_language(Language.MARKDOWN)
super().__init__(separators=separators, **kwargs)
[docs]class LatexTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Latex-formatted layout elements."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a LatexTextSplitter."""
separators = self.get_separators_for_language(Language.LATEX)
super().__init__(separators=separators, **kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
e98f17c4398f-0 | Source code for langchain.document_transformers
"""Transform documents"""
from typing import Any, Callable, List, Sequence
import numpy as np
from pydantic import BaseModel, Field
from langchain.embeddings.base import Embeddings
from langchain.math_utils import cosine_similarity
from langchain.schema import BaseDocumentTransformer, Document
class _DocumentWithState(Document):
"""Wrapper for a document that includes arbitrary state."""
state: dict = Field(default_factory=dict)
"""State associated with the document."""
def to_document(self) -> Document:
"""Convert the DocumentWithState to a Document."""
return Document(page_content=self.page_content, metadata=self.metadata)
@classmethod
def from_document(cls, doc: Document) -> "_DocumentWithState":
"""Create a DocumentWithState from a Document."""
if isinstance(doc, cls):
return doc
return cls(page_content=doc.page_content, metadata=doc.metadata)
[docs]def get_stateful_documents(
documents: Sequence[Document],
) -> Sequence[_DocumentWithState]:
return [_DocumentWithState.from_document(doc) for doc in documents]
def _filter_similar_embeddings(
embedded_documents: List[List[float]], similarity_fn: Callable, threshold: float
) -> List[int]:
"""Filter redundant documents based on the similarity of their embeddings."""
similarity = np.tril(similarity_fn(embedded_documents, embedded_documents), k=-1)
redundant = np.where(similarity > threshold)
redundant_stacked = np.column_stack(redundant)
redundant_sorted = np.argsort(similarity[redundant])[::-1]
included_idxs = set(range(len(embedded_documents)))
for first_idx, second_idx in redundant_stacked[redundant_sorted]: | https://python.langchain.com/en/latest/_modules/langchain/document_transformers.html |
e98f17c4398f-1 | for first_idx, second_idx in redundant_stacked[redundant_sorted]:
if first_idx in included_idxs and second_idx in included_idxs:
# Default to dropping the second document of any highly similar pair.
included_idxs.remove(second_idx)
return list(sorted(included_idxs))
def _get_embeddings_from_stateful_docs(
embeddings: Embeddings, documents: Sequence[_DocumentWithState]
) -> List[List[float]]:
if len(documents) and "embedded_doc" in documents[0].state:
embedded_documents = [doc.state["embedded_doc"] for doc in documents]
else:
embedded_documents = embeddings.embed_documents(
[d.page_content for d in documents]
)
for doc, embedding in zip(documents, embedded_documents):
doc.state["embedded_doc"] = embedding
return embedded_documents
[docs]class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
"""Filter that drops redundant documents by comparing their embeddings."""
embeddings: Embeddings
"""Embeddings to use for embedding document contents."""
similarity_fn: Callable = cosine_similarity
"""Similarity function for comparing documents. Function expected to take as input
two matrices (List[List[float]]) and return a matrix of scores where higher values
indicate greater similarity."""
similarity_threshold: float = 0.95
"""Threshold for determining when two documents are similar enough
to be considered redundant."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Filter down documents."""
stateful_documents = get_stateful_documents(documents) | https://python.langchain.com/en/latest/_modules/langchain/document_transformers.html |
e98f17c4398f-2 | """Filter down documents."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents, self.similarity_fn, self.similarity_threshold
)
return [stateful_documents[i] for i in sorted(included_idxs)]
[docs] async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_transformers.html |
e52585dff09b-0 | Source code for langchain.output_parsers.retry
from __future__ import annotations
from typing import TypeVar
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import (
BaseOutputParser,
OutputParserException,
PromptValue,
)
NAIVE_COMPLETION_RETRY = """Prompt:
{prompt}
Completion:
{completion}
Above, the Completion did not satisfy the constraints given in the Prompt.
Please try again:"""
NAIVE_COMPLETION_RETRY_WITH_ERROR = """Prompt:
{prompt}
Completion:
{completion}
Above, the Completion did not satisfy the constraints given in the Prompt.
Details: {error}
Please try again:"""
NAIVE_RETRY_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY)
NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template(
NAIVE_COMPLETION_RETRY_WITH_ERROR
)
T = TypeVar("T")
[docs]class RetryOutputParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt and the completion to another
LLM, and telling it the completion did not satisfy criteria in the prompt.
"""
parser: BaseOutputParser[T]
retry_chain: LLMChain
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT,
) -> RetryOutputParser[T]:
chain = LLMChain(llm=llm, prompt=prompt) | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html |
e52585dff09b-1 | chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain)
[docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
try:
parsed_completion = self.parser.parse(completion)
except OutputParserException:
new_completion = self.retry_chain.run(
prompt=prompt_value.to_string(), completion=completion
)
parsed_completion = self.parser.parse(new_completion)
return parsed_completion
[docs] def parse(self, completion: str) -> T:
raise NotImplementedError(
"This OutputParser can only be called by the `parse_with_prompt` method."
)
[docs] def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "retry"
[docs]class RetryWithErrorOutputParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt, the completion, AND the error
that was raised to another language model and telling it that the completion
did not work, and raised the given error. Differs from RetryOutputParser
in that this implementation provides the error that was raised back to the
LLM, which in theory should give it more information on how to fix it.
"""
parser: BaseOutputParser[T]
retry_chain: LLMChain
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT,
) -> RetryWithErrorOutputParser[T]: | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html |
e52585dff09b-2 | ) -> RetryWithErrorOutputParser[T]:
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain)
[docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
try:
parsed_completion = self.parser.parse(completion)
except OutputParserException as e:
new_completion = self.retry_chain.run(
prompt=prompt_value.to_string(), completion=completion, error=repr(e)
)
parsed_completion = self.parser.parse(new_completion)
return parsed_completion
[docs] def parse(self, completion: str) -> T:
raise NotImplementedError(
"This OutputParser can only be called by the `parse_with_prompt` method."
)
[docs] def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "retry_with_error"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html |
ca6a23bf3350-0 | Source code for langchain.output_parsers.pydantic
import json
import re
from typing import Type, TypeVar
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS
from langchain.schema import BaseOutputParser, OutputParserException
T = TypeVar("T", bound=BaseModel)
[docs]class PydanticOutputParser(BaseOutputParser[T]):
pydantic_object: Type[T]
[docs] def parse(self, text: str) -> T:
try:
# Greedy search for 1st json candidate.
match = re.search(
r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL
)
json_str = ""
if match:
json_str = match.group()
json_object = json.loads(json_str, strict=False)
return self.pydantic_object.parse_obj(json_object)
except (json.JSONDecodeError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg)
[docs] def get_format_instructions(self) -> str:
schema = self.pydantic_object.schema()
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str: | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/pydantic.html |
ca6a23bf3350-1 | @property
def _type(self) -> str:
return "pydantic"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/pydantic.html |
5370a30a3424-0 | Source code for langchain.output_parsers.rail_parser
from __future__ import annotations
from typing import Any, Dict
from langchain.schema import BaseOutputParser
[docs]class GuardrailsOutputParser(BaseOutputParser):
guard: Any
@property
def _type(self) -> str:
return "guardrails"
[docs] @classmethod
def from_rail(cls, rail_file: str, num_reasks: int = 1) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(guard=Guard.from_rail(rail_file, num_reasks=num_reasks))
[docs] @classmethod
def from_rail_string(
cls, rail_str: str, num_reasks: int = 1
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks))
[docs] def get_format_instructions(self) -> str:
return self.guard.raw_prompt.format_instructions
[docs] def parse(self, text: str) -> Dict:
return self.guard.parse(text)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/rail_parser.html |
8b82b3f2757f-0 | Source code for langchain.output_parsers.regex
from __future__ import annotations
import re
from typing import Dict, List, Optional
from langchain.schema import BaseOutputParser
[docs]class RegexParser(BaseOutputParser):
"""Class to parse the output into a dictionary."""
regex: str
output_keys: List[str]
default_output_key: Optional[str] = None
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_parser"
[docs] def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
match = re.search(self.regex, text)
if match:
return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)}
else:
if self.default_output_key is None:
raise ValueError(f"Could not parse output: {text}")
else:
return {
key: text if key == self.default_output_key else ""
for key in self.output_keys
}
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/regex.html |
ea7943981c46-0 | Source code for langchain.output_parsers.datetime
import random
from datetime import datetime, timedelta
from typing import List
from langchain.schema import BaseOutputParser, OutputParserException
from langchain.utils import comma_list
def _generate_random_datetime_strings(
pattern: str,
n: int = 3,
start_date: datetime = datetime(1, 1, 1),
end_date: datetime = datetime.now() + timedelta(days=3650),
) -> List[str]:
"""
Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
"""
examples = []
delta = end_date - start_date
for i in range(n):
random_delta = random.uniform(0, delta.total_seconds())
dt = start_date + timedelta(seconds=random_delta)
date_string = dt.strftime(pattern)
examples.append(date_string)
return examples
[docs]class DatetimeOutputParser(BaseOutputParser[datetime]):
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
[docs] def get_format_instructions(self) -> str:
examples = comma_list(_generate_random_datetime_strings(self.format))
return f"""Write a datetime string that matches the
following pattern: "{self.format}". Examples: {examples}"""
[docs] def parse(self, response: str) -> datetime:
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/datetime.html |
ea7943981c46-1 | ) from e
@property
def _type(self) -> str:
return "datetime"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/datetime.html |
0d268a24a711-0 | Source code for langchain.output_parsers.list
from __future__ import annotations
from abc import abstractmethod
from typing import List
from langchain.schema import BaseOutputParser
[docs]class ListOutputParser(BaseOutputParser):
"""Class to parse the output of an LLM call to a list."""
@property
def _type(self) -> str:
return "list"
[docs] @abstractmethod
def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
[docs]class CommaSeparatedListOutputParser(ListOutputParser):
"""Parse out comma separated lists."""
[docs] def get_format_instructions(self) -> str:
return (
"Your response should be a list of comma separated values, "
"eg: `foo, bar, baz`"
)
[docs] def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
return text.strip().split(", ")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/list.html |
5359cc1d3772-0 | Source code for langchain.output_parsers.structured
from __future__ import annotations
from typing import Any, List
from pydantic import BaseModel
from langchain.output_parsers.format_instructions import STRUCTURED_FORMAT_INSTRUCTIONS
from langchain.output_parsers.json import parse_and_check_json_markdown
from langchain.schema import BaseOutputParser
line_template = '\t"{name}": {type} // {description}'
[docs]class ResponseSchema(BaseModel):
name: str
description: str
type: str = "string"
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type=schema.type
)
[docs]class StructuredOutputParser(BaseOutputParser):
response_schemas: List[ResponseSchema]
[docs] @classmethod
def from_response_schemas(
cls, response_schemas: List[ResponseSchema]
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
[docs] def get_format_instructions(self) -> str:
schema_str = "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
[docs] def parse(self, text: str) -> Any:
expected_keys = [rs.name for rs in self.response_schemas]
return parse_and_check_json_markdown(text, expected_keys)
@property
def _type(self) -> str:
return "structured"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/structured.html |
b8734bee59e7-0 | Source code for langchain.output_parsers.regex_dict
from __future__ import annotations
import re
from typing import Dict, Optional
from langchain.schema import BaseOutputParser
[docs]class RegexDictParser(BaseOutputParser):
"""Class to parse the output into a dictionary."""
regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private:
output_key_to_format: Dict[str, str]
no_update_value: Optional[str] = None
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_dict_parser"
[docs] def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
result = {}
for output_key, expected_format in self.output_key_to_format.items():
specific_regex = self.regex_pattern.format(re.escape(expected_format))
matches = re.findall(specific_regex, text)
if not matches:
raise ValueError(
f"No match found for output key: {output_key} with expected format \
{expected_format} on text {text}"
)
elif len(matches) > 1:
raise ValueError(
f"Multiple matches found for output key: {output_key} with \
expected format {expected_format} on text {text}"
)
elif (
self.no_update_value is not None and matches[0] == self.no_update_value
):
continue
else:
result[output_key] = matches[0]
return result
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/regex_dict.html |
35eec0c5b588-0 | Source code for langchain.output_parsers.fix
from __future__ import annotations
from typing import TypeVar
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseOutputParser, OutputParserException
T = TypeVar("T")
[docs]class OutputFixingParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors."""
parser: BaseOutputParser[T]
retry_chain: LLMChain
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_FIX_PROMPT,
) -> OutputFixingParser[T]:
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain)
[docs] def parse(self, completion: str) -> T:
try:
parsed_completion = self.parser.parse(completion)
except OutputParserException as e:
new_completion = self.retry_chain.run(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
parsed_completion = self.parser.parse(new_completion)
return parsed_completion
[docs] def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "output_fixing"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/fix.html |
af0860406392-0 | Source code for langchain.embeddings.llamacpp
"""Wrapper around llama.cpp embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.embeddings.base import Embeddings
[docs]class LlamaCppEmbeddings(BaseModel, Embeddings):
"""Wrapper around llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
"""
client: Any #: :meta private:
model_path: str
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock") | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
af0860406392-1 | use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use. If None, the number
of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_param_names = [
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. " | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
af0860406392-2 | raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(text)
return list(map(float, embedding))
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
b6ecaf2575c8-0 | Source code for langchain.embeddings.tensorflow_hub
"""Wrapper around TensorflowHub embedding models."""
from typing import Any, List
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
[docs]class TensorflowHubEmbeddings(BaseModel, Embeddings):
"""Wrapper around tensorflow_hub embedding models.
To use, you should have the ``tensorflow_text`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import TensorflowHubEmbeddings
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
tf = TensorflowHubEmbeddings(model_url=url)
"""
embed: Any #: :meta private:
model_url: str = DEFAULT_MODEL_URL
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the tensorflow_hub and tensorflow_text."""
super().__init__(**kwargs)
try:
import tensorflow_hub
except ImportError:
raise ImportError(
"Could not import tensorflow-hub python package. "
"Please install it with `pip install tensorflow-hub``."
)
try:
import tensorflow_text # noqa
except ImportError:
raise ImportError(
"Could not import tensorflow_text python package. "
"Please install it with `pip install tensorflow_text``."
)
self.embed = tensorflow_hub.load(self.model_url)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html |
b6ecaf2575c8-1 | """Compute doc embeddings using a TensorflowHub embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.embed(texts).numpy()
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a TensorflowHub embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.embed([text]).numpy()[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html |
7b11115d5237-0 | Source code for langchain.embeddings.elasticsearch
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.utils import get_from_env
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
from langchain.embeddings.base import Embeddings
[docs]class ElasticsearchEmbeddings(Embeddings):
"""
Wrapper around Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
""" # noqa: E501
def __init__(
self,
client: MlClient,
model_id: str,
*,
input_field: str = "text_field",
):
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
[docs] @classmethod
def from_credentials(
cls,
model_id: str,
*,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None, | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
7b11115d5237-1 | es_user: Optional[str] = None,
es_password: Optional[str] = None,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from Elasticsearch credentials.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to.
es_user: (str, optional): Elasticsearch username.
es_password: (str, optional): Elasticsearch password.
Example:
.. code-block:: python
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
try:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
except ImportError:
raise ImportError( | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
7b11115d5237-2 | from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
"elasticsearch package not found, please install with 'pip install "
"elasticsearch'"
)
es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID")
es_user = es_user or get_from_env("es_user", "ES_USER")
es_password = es_password or get_from_env("es_password", "ES_PASSWORD")
# Connect to Elasticsearch
es_connection = Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
[docs] @classmethod
def from_es_connection(
cls,
model_id: str,
es_connection: Elasticsearch,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
7b11115d5237-3 | Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
# Importing MlClient from elasticsearch.client within the method to
# avoid unnecessary import if the method is not used
from elasticsearch.client import MlClient
# Create an MlClient from the given Elasticsearch connection
client = MlClient(es_connection)
# Return a new instance of the ElasticsearchEmbeddings class with
# the MlClient, model_id, and input_field
return cls(client, model_id, input_field=input_field)
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model( | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
7b11115d5237-4 | list.
"""
response = self.client.infer_trained_model(
model_id=self.model_id, docs=[{self.input_field: text} for text in texts]
)
embeddings = [doc["predicted_value"] for doc in response["inference_results"]]
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for a list of documents.
Args:
texts (List[str]): A list of document text strings to generate embeddings
for.
Returns:
List[List[float]]: A list of embeddings, one for each document in the input
list.
"""
return self._embedding_func(texts)
[docs] def embed_query(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
47e5eeb4b1e9-0 | Source code for langchain.embeddings.bedrock
import json
import os
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
[docs]class BedrockEmbeddings(BaseModel, Embeddings):
"""Embeddings provider to invoke Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from langchain.bedrock_embeddings import BedrockEmbeddings
region_name ="us-east-1"
credentials_profile_name = "default"
model_id = "amazon.titan-e1t-medium"
be = BedrockEmbeddings(
credentials_profile_name=credentials_profile_name,
region_name=region_name,
model_id=model_id
)
"""
client: Any #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance, | https://python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html |
47e5eeb4b1e9-1 | If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str = "amazon.titan-e1t-medium"
"""Id of the model to call, e.g., amazon.titan-e1t-medium, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
values["client"] = session.client("bedrock", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values | https://python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html |
47e5eeb4b1e9-2 | "profile name are valid."
) from e
return values
def _embedding_func(self, text: str) -> List[float]:
"""Call out to Bedrock embedding endpoint."""
# replace newlines, which can negatively affect performance.
text = text.replace(os.linesep, " ")
_model_kwargs = self.model_kwargs or {}
input_body = {**_model_kwargs}
input_body["inputText"] = text
body = json.dumps(input_body)
content_type = "application/json"
accepts = "application/json"
embeddings = []
try:
response = self.client.invoke_model(
body=body,
modelId=self.model_id,
accept=accepts,
contentType=content_type,
)
response_body = json.loads(response.get("body").read())
embeddings = response_body.get("embedding")
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return embeddings
[docs] def embed_documents(
self, texts: List[str], chunk_size: int = 1
) -> List[List[float]]:
"""Compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed.
chunk_size: Bedrock currently only allows single string
inputs, so chunk size is always 1. This input is here
only for compatibility with the embeddings interface.
Returns:
List of embeddings, one for each text.
"""
results = []
for text in texts:
response = self._embedding_func(text)
results.append(response)
return results
[docs] def embed_query(self, text: str) -> List[float]: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html |
47e5eeb4b1e9-3 | [docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func(text)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html |
38c1e1d83a2e-0 | Source code for langchain.embeddings.self_hosted_hugging_face
"""Wrapper around HuggingFace embedding models for self-hosted remote hardware."""
import importlib
import logging
from typing import Any, Callable, List, Optional
from langchain.embeddings.self_hosted import SelfHostedEmbeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
logger = logging.getLogger(__name__)
def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return client.encode(*args, **kwargs)
def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any:
"""Load the embedding model."""
if not instruct:
import sentence_transformers
client = sentence_transformers.SentenceTransformer(model_id)
else:
from InstructorEmbedding import INSTRUCTOR
client = INSTRUCTOR(model_id)
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning( | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
38c1e1d83a2e-1 | if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
client = client.to(device)
return client
[docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings):
"""Runs sentence_transformers embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import SelfHostedHuggingFaceEmbeddings
import runhouse as rh
model_name = "sentence-transformers/all-mpnet-base-v2"
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu)
"""
client: Any #: :meta private:
model_id: str = DEFAULT_MODEL_NAME
"""Model name to use."""
model_reqs: List[str] = ["./", "sentence_transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_load_fn: Callable = load_embedding_model | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
38c1e1d83a2e-2 | model_load_fn: Callable = load_embedding_model
"""Function to load the model remotely on the server."""
load_fn_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model load function."""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
[docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings):
"""Runs InstructorEmbedding embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings
import runhouse as rh
model_name = "hkunlp/instructor-large"
gpu = rh.cluster(name='rh-a10x', instance_type='A100:1')
hf = SelfHostedHuggingFaceInstructEmbeddings(
model_name=model_name, hardware=gpu)
""" | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
38c1e1d83a2e-3 | model_name=model_name, hardware=gpu)
"""
model_id: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"]
"""Requirements to install on hardware to inference the model."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get(
"model_id", DEFAULT_INSTRUCT_MODEL
)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = []
for text in texts:
instruction_pairs.append([self.embed_instruction, text])
embeddings = self.client(self.pipeline_ref, instruction_pairs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
38c1e1d83a2e-4 | Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client(self.pipeline_ref, [instruction_pair])[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
64ef7e8ab507-0 | Source code for langchain.embeddings.huggingface_hub
"""Wrapper around HuggingFace Hub embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2"
VALID_TASKS = ("feature-extraction",)
[docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings):
"""Wrapper around HuggingFaceHub embedding models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceHubEmbeddings
repo_id = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceHubEmbeddings(
repo_id=repo_id,
task="feature-extraction",
huggingfacehub_api_token="my-api-key",
)
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = "feature-extraction"
"""Task to call the model with."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
64ef7e8ab507-1 | @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
if not repo_id.startswith("sentence-transformers"):
raise ValueError(
"Currently only 'sentence-transformers' embedding models "
f"are supported. Got invalid 'repo_id' {repo_id}."
)
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
# replace newlines, which can negatively affect performance.
texts = [text.replace("\n", " ") for text in texts] | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
64ef7e8ab507-2 | texts = [text.replace("\n", " ") for text in texts]
_model_kwargs = self.model_kwargs or {}
responses = self.client(inputs=texts, params=_model_kwargs)
return responses
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
response = self.embed_documents([text])[0]
return response
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
094599aa4c01-0 | Source code for langchain.embeddings.aleph_alpha
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
"""
Wrapper for Aleph Alpha's Asymmetric Embeddings
AA provides you with an endpoint to embed a document and a query.
The models were optimized to make the embeddings of documents and
the query for a document as similar as possible.
To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding
embeddings = AlephAlphaSymmetricSemanticEmbedding()
document = "This is a content of the document"
query = "What is the content of the document?"
doc_result = embeddings.embed_documents([document])
query_result = embeddings.embed_query(query)
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
hosting: Optional[str] = "https://api.aleph-alpha.com"
"""Optional parameter that specifies which datacenters may process the request."""
normalize: Optional[bool] = True
"""Should returned embeddings be normalized"""
compress_to_size: Optional[int] = 128
"""Should the returned embeddings come back as an original 5120-dim vector,
or should it be compressed to 128-dim."""
contextual_control_threshold: Optional[int] = None
"""Attention control parameters only apply to those tokens that have | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
094599aa4c01-1 | """Attention control parameters only apply to those tokens that have
explicitly been set in the request."""
control_log_additive: Optional[bool] = True
"""Apply controls on prompt items by adding the log(control_factor)
to attention scores."""
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
values["client"] = Client(token=aleph_alpha_api_key)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's asymmetric Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
document_embeddings = []
for text in texts:
document_params = {
"prompt": Prompt.from_text(text), | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
094599aa4c01-2 | document_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Document,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
document_request = SemanticEmbeddingRequest(**document_params)
document_response = self.client.semantic_embed(
request=document_request, model=self.model
)
document_embeddings.append(document_response.embedding)
return document_embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
symmetric_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Query,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
symmetric_request = SemanticEmbeddingRequest(**symmetric_params)
symmetric_response = self.client.semantic_embed(
request=symmetric_request, model=self.model
)
return symmetric_response.embedding | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
094599aa4c01-3 | request=symmetric_request, model=self.model
)
return symmetric_response.embedding
[docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding):
"""The symmetric version of the Aleph Alpha's semantic embeddings.
The main difference is that here, both the documents and
queries are embedded with a SemanticRepresentation.Symmetric
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding
embeddings = AlephAlphaAsymmetricSemanticEmbedding()
text = "This is a test text"
doc_result = embeddings.embed_documents([text])
query_result = embeddings.embed_query(text)
"""
def _embed(self, text: str) -> List[float]:
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
query_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Symmetric,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
query_request = SemanticEmbeddingRequest(**query_params)
query_response = self.client.semantic_embed(
request=query_request, model=self.model
)
return query_response.embedding
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's Document endpoint. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
094599aa4c01-4 | """Call out to Aleph Alpha's Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
document_embeddings = []
for text in texts:
document_embeddings.append(self._embed(text))
return document_embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed(text)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
eedc1f895dcc-0 | Source code for langchain.embeddings.fake
from typing import List
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
[docs]class FakeEmbeddings(Embeddings, BaseModel):
size: int
def _get_embedding(self) -> List[float]:
return list(np.random.normal(size=self.size))
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding() for _ in texts]
[docs] def embed_query(self, text: str) -> List[float]:
return self._get_embedding()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html |
1533f89840fa-0 | Source code for langchain.embeddings.cohere
"""Wrapper around Cohere embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class CohereEmbeddings(BaseModel, Embeddings):
"""Wrapper around Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v2.0", cohere_api_key="my-api-key"
)
"""
client: Any #: :meta private:
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ValueError( | https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
1533f89840fa-1 | except ImportError:
raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model, texts=texts, truncate=self.truncate
).embeddings
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(
model=self.model, texts=[text], truncate=self.truncate
).embeddings[0]
return list(map(float, embedding))
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
00bc5482a026-0 | Source code for langchain.embeddings.modelscope_hub
"""Wrapper around ModelScopeHub embedding models."""
from typing import Any, List
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
[docs]class ModelScopeEmbeddings(BaseModel, Embeddings):
"""Wrapper around modelscope_hub embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import ModelScopeEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddings(model_id=model_id)
"""
embed: Any
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id)
except ImportError as e:
raise ImportError(
"Could not import some python packages."
"Please install it with `pip install modelscope`."
) from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts)) | https://python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html |
00bc5482a026-1 | texts = list(map(lambda x: x.replace("\n", " "), texts))
inputs = {"source_sentence": texts}
embeddings = self.embed(input=inputs)["text_embedding"]
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
inputs = {"source_sentence": [text]}
embedding = self.embed(input=inputs)["text_embedding"][0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html |
682a003d0ef2-0 | Source code for langchain.embeddings.huggingface
"""Wrapper around HuggingFace embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
[docs]class HuggingFaceEmbeddings(BaseModel, Embeddings):
"""Wrapper around sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
hf = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
client: Any #: :meta private:
model_name: str = DEFAULT_MODEL_NAME
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass when calling the `encode` method of the model.""" | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
682a003d0ef2-1 | """Key word arguments to pass when calling the `encode` method of the model."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence_transformers`."
) from exc
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client.encode(texts, **self.encode_kwargs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.client.encode(text, **self.encode_kwargs)
return embedding.tolist()
[docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings):
"""Wrapper around sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
682a003d0ef2-2 | To use, you should have the ``sentence_transformers``
and ``InstructorEmbedding`` python packages installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-large"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
hf = HuggingFaceInstructEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
client: Any #: :meta private:
model_name: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass when calling the `encode` method of the model."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
from InstructorEmbedding import INSTRUCTOR
self.client = INSTRUCTOR(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
except ImportError as e:
raise ValueError("Dependencies for InstructorEmbedding not found.") from e
class Config: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
682a003d0ef2-3 | raise ValueError("Dependencies for InstructorEmbedding not found.") from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [[self.embed_instruction, text] for text in texts]
embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
efe14e94b84b-0 | Source code for langchain.embeddings.deepinfra
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32"
[docs]class DeepInfraEmbeddings(BaseModel, Embeddings):
"""Wrapper around Deep Infra's embedding inference service.
To use, you should have the
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
There are multiple embeddings models available,
see https://deepinfra.com/models?type=embeddings.
Example:
.. code-block:: python
from langchain.embeddings import DeepInfraEmbeddings
deepinfra_emb = DeepInfraEmbeddings(
model_id="sentence-transformers/clip-ViT-B-32",
deepinfra_api_token="my-api-key"
)
r1 = deepinfra_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = deepinfra_emb.embed_query(
"What is the second letter of Greek alphabet"
)
"""
model_id: str = DEFAULT_MODEL_ID
"""Embeddings model to use."""
normalize: bool = False
"""whether to normalize the computed embeddings"""
embed_instruction: str = "passage: "
"""Instruction used to embed documents."""
query_instruction: str = "query: "
"""Instruction used to embed the query."""
model_kwargs: Optional[dict] = None | https://python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html |
efe14e94b84b-1 | model_kwargs: Optional[dict] = None
"""Other model keyword args"""
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"model_id": self.model_id}
def _embed(self, input: List[str]) -> List[List[float]]:
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
# send request
try:
res = requests.post(
f"https://api.deepinfra.com/v1/inference/{self.model_id}",
headers=headers,
json={"inputs": input, "normalize": self.normalize, **_model_kwargs},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
embeddings = t["embeddings"] | https://python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html |
efe14e94b84b-2 | try:
t = res.json()
embeddings = t["embeddings"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a Deep Infra deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [f"{self.query_instruction}{text}" for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Deep Infra deployed embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = f"{self.query_instruction}{text}"
embedding = self._embed([instruction_pair])[0]
return embedding
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html |
Subsets and Splits