status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
import warnings from typing import ( AbstractSet, Any, AsyncIterator, Callable, Collection, Dict, Iterator, List, Literal, Mapping, Optional, Set, Tuple, Union, ) from langchain_core.outputs import Generation, GenerationChunk, LLMResult from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.utils import get_pydantic_field_names from langchain_core.utils.utils import build_extra_kwargs from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM, create_base_retry_decorator from langchain.utils import get_from_dict_or_env from langchain.utils.openai import is_openai_v1 logger = logging.getLogger(__name__) def update_token_usage(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _stream_response_to_generation_chunk( stream_response: Dict[str, Any], ) -> GenerationChunk: """Convert a stream response to a generation chunk.""" if not stream_response["choices"]: return GenerationChunk(text="") return GenerationChunk( text=stream_response["choices"][0]["text"], generation_info=dict( finish_reason=stream_response["choices"][0].get("finish_reason", None), logprobs=stream_response["choices"][0].get("logprobs", None), ), ) def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0].get( "finish_reason", None ) response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator( llm: Union[BaseOpenAI, OpenAIChat], run_manager: Optional[ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: import openai errors = [ openai.error.Timeout, openai.error.APIError, openai.error.APIConnectionError, openai.error.RateLimitError, openai.error.ServiceUnavailableError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager ) def completion_with_retry(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
llm: Union[BaseOpenAI, OpenAIChat], run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" if is_openai_v1(): return llm.client.create(**kwargs) retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the async completion call.""" if is_openai_v1(): return await llm.async_client.create(**kwargs) retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) class BaseOpenAI(BaseLLM):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Base OpenAI large language model class.""" @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} @property def lc_attributes(self) -> Dict[str, Any]: attributes: Dict[str, Any] = {} if self.openai_api_base: attributes["openai_api_base"] = self.openai_api_base if self.openai_organization: attributes["openai_organization"] = self.openai_organization if self.openai_proxy: attributes["openai_proxy"] = self.openai_proxy return attributes @classmethod def is_lc_serializable(cls) -> bool: return True client: Any = Field(default=None, exclude=True) async_client: Any = Field(default=None, exclude=True) model_name: str = Field(default="text-davinci-003", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion.
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
-1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = Field(default=None, alias="api_key") """Automatically inferred from env var `OPENAI_API_KEY` if not provided.""" openai_api_base: Optional[str] = Field(default=None, alias="base_url") """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" openai_organization: Optional[str] = Field(default=None, alias="organization") """Automatically inferred from env var `OPENAI_ORG_ID` if not provided.""" openai_proxy: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Union[float, Tuple[float, float], Any, None] = Field( default=None, alias="timeout"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
) """Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or None.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 2 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" default_headers: Union[Mapping[str, str], None] = None default_query: Union[Mapping[str, object], None] = None # Co # [h http_client: Union[Any, None] = None """Optional httpx.Client.""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # ty
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Initialize the OpenAI object.""" model_name = data.get("model_name", "") if ( model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4") ) and "-instruct" not in model_name: warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) values["model_kwargs"] = build_extra_kwargs( extra, values, all_required_field_names ) return values @root_validator() def validate_environment(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Validate that api key and python package exists in environment.""" if values["n"] < 1: raise ValueError("n must be at least 1.") if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = values["openai_api_base"] or os.getenv( "OPENAI_API_BASE" ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) values["openai_organization"] = ( values["openai_organization"] or os.getenv("OPENAI_ORG_ID") or os.getenv("OPENAI_ORGANIZATION")
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
) try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if is_openai_v1(): client_params = { "api_key": values["openai_api_key"], "organization": values["openai_organization"], "base_url": values["openai_api_base"], "timeout": values["request_timeout"], "max_retries": values["max_retries"], "default_headers": values["default_headers"], "default_query": values["default_query"], "http_client": values["http_client"], } if not values.get("client"): values["client"] = openai.OpenAI(**client_params).completions if not values.get("async_client"): values["async_client"] = openai.AsyncOpenAI(**client_params).completions elif not values.get("client"): values["client"] = openai.Completion else: pass return values @property def _default_params(self) -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Get the default parameters for calling OpenAI API.""" normal_params: Dict[str, Any] = { "temperature": self.temperature, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } if self.max_tokens is not None: normal_params["max_tokens"] = self.max_tokens if self.request_timeout is not None and not is_openai_v1(): normal_params["request_timeout"] = self.request_timeout # Az # do if self.best_of > 1: normal_params["best_of"] = self.best_of return {**normal_params, **self.model_kwargs} def _stream(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: params = {**self._invocation_params, **kwargs, "stream": True} self.get_sub_prompts(params, [prompt], stop) # th for stream_resp in completion_with_retry( self, prompt=prompt, run_manager=run_manager, **params ): if not isinstance(stream_resp, dict): stream_resp = stream_resp.dict() chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, logprobs=chunk.generation_info["logprobs"] if chunk.generation_info else None, ) async def _astream(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: params = {**self._invocation_params, **kwargs, "stream": True} self.get_sub_prompts(params, [prompt], stop) # th async for stream_resp in await acompletion_with_retry( self, prompt=prompt, run_manager=run_manager, **params ): if not isinstance(stream_resp, dict): stream_resp = stream_resp.dict() chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: await run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, logprobs=chunk.generation_info["logprobs"] if chunk.generation_info else None, ) def _generate(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TO params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Ge # In _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} system_fingerprint: Optional[str] = None for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") generation: Optional[GenerationChunk] = None for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None choices.append(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
{ "text": generation.text, "finish_reason": generation.generation_info.get("finish_reason") if generation.generation_info else None, "logprobs": generation.generation_info.get("logprobs") if generation.generation_info else None, } ) else: response = completion_with_retry( self, prompt=_prompts, run_manager=run_manager, **params ) if not isinstance(response, dict): # V1 # di response = response.dict() choices.extend(response["choices"]) update_token_usage(_keys, response, token_usage) if not system_fingerprint: system_fingerprint = response.get("system_fingerprint") return self.create_llm_result( choices, prompts, params, token_usage, system_fingerprint=system_fingerprint, ) async def _agenerate(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Ge # In _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} system_fingerprint: Optional[str] = None for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") generation: Optional[GenerationChunk] = None async for chunk in self._astream( _prompts[0], stop, run_manager, **kwargs ): if generation is None: generation = chunk
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
else: generation += chunk assert generation is not None choices.append( { "text": generation.text, "finish_reason": generation.generation_info.get("finish_reason") if generation.generation_info else None, "logprobs": generation.generation_info.get("logprobs") if generation.generation_info else None, } ) else: response = await acompletion_with_retry( self, prompt=_prompts, run_manager=run_manager, **params ) if not isinstance(response, dict): response = response.dict() choices.extend(response["choices"]) update_token_usage(_keys, response, token_usage) return self.create_llm_result( choices, prompts, params, token_usage, system_fingerprint=system_fingerprint, ) def get_sub_prompts(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None, ) -> List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: raise ValueError( "max_tokens set to -1 not supported for multiple inputs." ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts def create_llm_result(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, choices: Any, prompts: List[str], params: Dict[str, Any], token_usage: Dict[str, int], *, system_fingerprint: Optional[str] = None, ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] n = params.get("n", self.n) for i, _ in enumerate(prompts): sub_choices = choices[i * n : (i + 1) * n] generations.append( [ Generation( text=choice["text"], generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ),
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
) for choice in sub_choices ] ) llm_output = {"token_usage": token_usage, "model_name": self.model_name} if system_fingerprint: llm_output["system_fingerprint"] = system_fingerprint return LLMResult(generations=generations, llm_output=llm_output) @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = {} if not is_openai_v1(): openai_creds.update( { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, } ) if self.openai_proxy: import openai openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # ty return {**openai_creds, **self._default_params} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Return type of llm.""" return "openai" def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # ti if sys.version_info[1] < 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) model_name = self.tiktoken_model_name or self.model_name try: enc = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" enc = tiktoken.get_encoding(model) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) @staticmethod def modelname_to_contextsize(modelname: str) -> int:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("text-davinci-003") """ model_token_mapping = { "gpt-4": 8192, "gpt-4-0314": 8192, "gpt-4-0613": 8192, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "gpt-4-32k-0613": 32768, "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0301": 4096,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"gpt-3.5-turbo-0613": 4096, "gpt-3.5-turbo-16k": 16385, "gpt-3.5-turbo-16k-0613": 16385, "gpt-3.5-turbo-instruct": 4096, "text-ada-001": 2049, "ada": 2049, "text-babbage-001": 2040, "babbage": 2049, "text-curie-001": 2049, "curie": 2049, "davinci": 2049, "text-davinci-003": 4097, "text-davinci-002": 4097, "code-davinci-002": 8001, "code-davinci-001": 8001, "code-cushman-002": 2048, "code-cushman-001": 2048, } # ha if "ft-" in modelname: modelname = modelname.split(":")[0] context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f"Unknown model: {modelname}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(model_token_mapping.keys()) ) return context_size @property def max_context_size(self) -> int:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Get max context size for this model.""" return self.modelname_to_contextsize(self.model_name) def max_tokens_for_prompt(self, prompt: str) -> int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) return self.max_context_size - num_tokens class OpenAI(BaseOpenAI): """OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} class AzureOpenAI(BaseOpenAI):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Azure-specific OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import AzureOpenAI openai = AzureOpenAI(model_name="text-davinci-003") """ azure_endpoint: Union[str, None] = None """Your Azure endpoint, including the resource. Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. Example: `https://example-resource.azure.openai.com/` """ deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment") """A model deployment. If given sets the base client URL to include `/deployments/{azure_deployment}`. Note: this means you won't be able to use non-deployment endpoints. """ openai_api_version: str = Field(default="", alias="api_version") """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" openai_api_key: Union[str, None] = Field(default=None, alias="api_key") """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" azure_ad_token: Union[str, None] = None """Your Azure Active Directory token. Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. """ # no
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
azure_ad_token_provider: Union[str, None] = None """A function that returns an Azure Active Directory token. Will be invoked on every request. """ openai_api_type: str = "" """Legacy, for openai<1.0.0 support.""" validate_base_url: bool = True """For backwards compatibility. If legacy val openai_api_base is passed in, try to infer if it is a base_url or azure_endpoint and update accordingly. """ @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" if values["n"] < 1: raise ValueError("n must be at least 1.") if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") # Ch # TO # ot values["openai_api_key"] = ( values["openai_api_key"] or os.getenv("AZURE_OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY") ) values["azure_endpoint"] = values["azure_endpoint"] or os.getenv( "AZURE_OPENAI_ENDPOINT" )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv( "AZURE_OPENAI_AD_TOKEN" ) values["openai_api_base"] = values["openai_api_base"] or os.getenv( "OPENAI_API_BASE" ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) values["openai_organization"] = ( values["openai_organization"] or os.getenv("OPENAI_ORG_ID") or os.getenv("OPENAI_ORGANIZATION") ) values["openai_api_version"] = values["openai_api_version"] or os.getenv( "OPENAI_API_VERSION" ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="azure" ) try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
if is_openai_v1(): # Fo # be openai_api_base = values["openai_api_base"] if openai_api_base and values["validate_base_url"]: if "/openai" not in openai_api_base: values["openai_api_base"] = ( values["openai_api_base"].rstrip("/") + "/openai" ) warnings.warn( "As of openai>=1.0.0, Azure endpoints should be specified via " f"the `azure_endpoint` param not `openai_api_base` " f"(or alias `base_url`). Updating `openai_api_base` from " f"{openai_api_base} to {values['openai_api_base']}." ) if values["deployment_name"]: warnings.warn( "As of openai>=1.0.0, if `deployment_name` (or alias " "`azure_deployment`) is specified then " "`openai_api_base` (or alias `base_url`) should not be. " "Instead use `deployment_name` (or alias `azure_deployment`) " "and `azure_endpoint`." ) if values["deployment_name"] not in values["openai_api_base"]: warnings.warn( "As of openai>=1.0.0, if `openai_api_base` " "(or alias `base_url`) is specified it is expected to be " "of the form " "https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # no f"Updating {openai_api_base} to "
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
f"{values['openai_api_base']}." ) values["openai_api_base"] += ( "/deployments/" + values["deployment_name"] ) values["deployment_name"] = None client_params = { "api_version": values["openai_api_version"], "azure_endpoint": values["azure_endpoint"], "azure_deployment": values["deployment_name"], "api_key": values["openai_api_key"], "azure_ad_token": values["azure_ad_token"], "azure_ad_token_provider": values["azure_ad_token_provider"], "organization": values["openai_organization"], "base_url": values["openai_api_base"], "timeout": values["request_timeout"], "max_retries": values["max_retries"], "default_headers": values["default_headers"], "default_query": values["default_query"], "http_client": values["http_client"], } values["client"] = openai.AzureOpenAI(**client_params).completions values["async_client"] = openai.AsyncAzureOpenAI( **client_params ).completions else: values["client"] = openai.Completion return values @property def _identifying_params(self) -> Mapping[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
return { **{"deployment_name": self.deployment_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: if is_openai_v1(): openai_params = {"model": self.deployment_name} else: openai_params = { "engine": self.deployment_name, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } return {**openai_params, **super()._invocation_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "azure" @property def lc_attributes(self) -> Dict[str, Any]: return { "openai_api_type": self.openai_api_type, "openai_api_version": self.openai_api_version, } class OpenAIChat(BaseLLM):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any = Field(default=None, exclude=True) async_client: Any = Field(default=None, exclude=True) model_name: str = "gpt-3.5-turbo" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = Field(default=None, alias="api_key")
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided.""" openai_api_base: Optional[str] = Field(default=None, alias="base_url") """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" openai_proxy: Optional[str] = None max_retries: int = 6 """Maximum number of retries to make when generating.""" prefix_messages: List = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="" )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
try: import openai openai.api_key = openai_api_key if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # ty except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return values @property def _default_params(self) -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
"""Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params( self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"OpenAIChat currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for Ch del params["max_tokens"] return messages, params def _stream(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: messages, params = self._get_chat_params([prompt], stop) params = {**params, **kwargs, "stream": True} for stream_resp in completion_with_retry( self, messages=messages, run_manager=run_manager, **params ): if not isinstance(stream_resp, dict): stream_resp = stream_resp.dict() token = stream_resp["choices"][0]["delta"].get("content", "") chunk = GenerationChunk(text=token) yield chunk if run_manager: run_manager.on_llm_new_token(token, chunk=chunk) async def _astream(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: messages, params = self._get_chat_params([prompt], stop) params = {**params, **kwargs, "stream": True} async for stream_resp in await acompletion_with_retry( self, messages=messages, run_manager=run_manager, **params ): if not isinstance(stream_resp, dict): stream_resp = stream_resp.dict() token = stream_resp["choices"][0]["delta"].get("content", "") chunk = GenerationChunk(text=token) yield chunk if run_manager: await run_manager.on_llm_new_token(token, chunk=chunk) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: if self.streaming: generation: Optional[GenerationChunk] = None for chunk in self._stream(prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} full_response = completion_with_retry( self, messages=messages, run_manager=run_manager, **params ) if not isinstance(full_response, dict): full_response = full_response.dict() llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) async def _agenerate(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: if self.streaming: generation: Optional[GenerationChunk] = None async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} full_response = await acompletion_with_retry( self, messages=messages, run_manager=run_manager, **params ) if not isinstance(full_response, dict): full_response = full_response.dict() llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ],
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,069
AzureOpenAI azure_ad_token_provider Keyerror
### System Info When I use below snippet of code ``` import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ``` I get error : ```--------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[36], line 21 18 # api_version = "2023-05-15" 19 endpoint = "https://xxxx.openai.azure.com" ---> 21 client = AzureOpenAI( 22 azure_endpoint=endpoint, 23 api_version="2023-05-15", 24 azure_deployment="example-gpt-4", 25 azure_ad_token_provider=token_provider, 26 ) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs) 96 def __init__(self, **kwargs: Any) -> None: ---> 97 super().__init__(**kwargs) 98 self._lc_kwargs = kwargs File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data) 333 """ 334 Create a new model by parsing and validating input data from keyword arguments. 335 336 Raises ValidationError if the input data cannot be parsed to form a valid model. 337 """ 338 # Uses something other than `self` the first arg to allow "self" as a settable attribute --> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error: 341 raise validation_error File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls) 1100 continue 1101 try: -> 1102 values = validator(cls_, values) 1103 except (ValueError, TypeError, AssertionError) as exc: 1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values) 877 values["openai_api_base"] += ( 878 "/deployments/" + values["deployment_name"] 879 ) 880 values["deployment_name"] = None 881 client_params = { 882 "api_version": values["openai_api_version"], 883 "azure_endpoint": values["azure_endpoint"], 884 "azure_deployment": values["deployment_name"], 885 "api_key": values["openai_api_key"], 886 "azure_ad_token": values["azure_ad_token"], --> 887 "azure_ad_token_provider": values["azure_ad_token_provider"], 888 "organization": values["openai_organization"], 889 "base_url": values["openai_api_base"], 890 "timeout": values["request_timeout"], 891 "max_retries": values["max_retries"], 892 "default_headers": values["default_headers"], 893 "default_query": values["default_query"], 894 "http_client": values["http_client"], 895 } 896 values["client"] = openai.AzureOpenAI(**client_params).completions 897 values["async_client"] = openai.AsyncAzureOpenAI( 898 **client_params 899 ).completions KeyError: 'azure_ad_token_provider' ``` Ive also tried AzureChatOpenAI , and I get the same error back. The error is not reproduced when I use openai library AzureOpenAI . Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with. any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work. libraries : pydantic 1.10.12 pydantic_core 2.10.1 openai 1.2.0 langchain 0.0.342 langchain-core 0.0.7 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os from azure.identity import DefaultAzureCredential from azure.identity import get_bearer_token_provider from langchain.llms import AzureOpenAI from langchain.chat_models import AzureChatOpenAI credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id, interactive_browser_client_id=client_id, client_secret=client_secret) token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default") endpoint = "https://xxxx.openai.azure.com" client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) ### Expected behavior client = AzureOpenAI( azure_endpoint=endpoint, api_version="2023-05-15", azure_deployment="example-gpt-4", azure_ad_token_provider=token_provider) should return a Runnable instance which I can use for LLMChain
https://github.com/langchain-ai/langchain/issues/14069
https://github.com/langchain-ai/langchain/pull/14166
9938086df07d69d24f9770209ea9087d3b906155
62505043be20cf8af491e30785a6ca0eeb1d276e
"2023-11-30T13:39:55Z"
python
"2023-12-03T16:55:25Z"
libs/langchain/langchain/llms/openai.py
llm_output=llm_output, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai-chat" def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # ti if sys.version_info[1] < 8: return super().get_token_ids(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) enc = tiktoken.encoding_for_model(self.model_name) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/embeddings/clarifai.py
import logging from typing import Any, Dict, List, Optional from langchain_core.embeddings import Embeddings from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class ClarifaiEmbeddings(BaseModel, Embeddings):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/embeddings/clarifai.py
"""Clarifai embedding models. To use, you should have the ``clarifai`` python package installed, and the environment variable ``CLARIFAI_PAT`` set with your personal access token or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import ClarifaiEmbeddings clarifai = ClarifaiEmbeddings( model="embed-english-light-v3.0", clarifai_api_key="my-api-key" ) """ stub: Any """Clarifai stub.""" userDataObject: Any """Clarifai user data object.""" model_id: Optional[str] = None """Model id to use.""" model_version_id: Optional[str] = None """Model version id to use.""" app_id: Optional[str] = None """Clarifai application id to use.""" user_id: Optional[str] = None """Clarifai user id to use.""" pat: Optional[str] = None """Clarifai personal access token to use.""" api_base: str = "https://api.clarifai.com" class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/embeddings/clarifai.py
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT") user_id = values.get("user_id") app_id = values.get("app_id") model_id = values.get("model_id") if values["pat"] is None: raise ValueError("Please provide a pat.") if user_id is None: raise ValueError("Please provide a user_id.") if app_id is None: raise ValueError("Please provide a app_id.") if model_id is None: raise ValueError("Please provide a model_id.") try: from clarifai.auth.helper import ClarifaiAuthHelper from clarifai.client import create_stub except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) auth = ClarifaiAuthHelper( user_id=user_id, app_id=app_id, pat=values["pat"], base=values["api_base"],
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/embeddings/clarifai.py
) values["userDataObject"] = auth.get_user_app_id_proto() values["stub"] = create_stub(auth) return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Clarifai's embedding models. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from clarifai_grpc.grpc.api import ( resources_pb2, service_pb2, ) from clarifai_grpc.grpc.api.status import status_code_pb2 except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) batch_size = 32 embeddings = [] for i in range(0, len(texts), batch_size): batch = texts[i : i + batch_size] post_model_outputs_request = service_pb2.PostModelOutputsRequest( user_app_id=self.userDataObject, model_id=self.model_id, version_id=self.model_version_id,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/embeddings/clarifai.py
inputs=[ resources_pb2.Input( data=resources_pb2.Data(text=resources_pb2.Text(raw=t)) ) for t in batch ], ) post_model_outputs_response = self.stub.PostModelOutputs( post_model_outputs_request ) if post_model_outputs_response.status.code != status_code_pb2.SUCCESS: logger.error(post_model_outputs_response.status) first_output_failure = ( post_model_outputs_response.outputs[0].status if len(post_model_outputs_response.outputs) else None ) raise Exception( f"Post model outputs failed, status: " f"{post_model_outputs_response.status}, first output failure: " f"{first_output_failure}" ) embeddings.extend( [ list(o.data.embeddings[0].vector) for o in post_model_outputs_response.outputs ] ) return embeddings def embed_query(self, text: str) -> List[float]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/embeddings/clarifai.py
"""Call out to Clarifai's embedding models. Args: text: The text to embed. Returns: Embeddings for the text. """ try: from clarifai_grpc.grpc.api import ( resources_pb2, service_pb2, ) from clarifai_grpc.grpc.api.status import status_code_pb2 except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) post_model_outputs_request = service_pb2.PostModelOutputsRequest(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/embeddings/clarifai.py
user_app_id=self.userDataObject, model_id=self.model_id, version_id=self.model_version_id, inputs=[ resources_pb2.Input( data=resources_pb2.Data(text=resources_pb2.Text(raw=text)) ) ], ) post_model_outputs_response = self.stub.PostModelOutputs( post_model_outputs_request ) if post_model_outputs_response.status.code != status_code_pb2.SUCCESS: logger.error(post_model_outputs_response.status) first_output_failure = ( post_model_outputs_response.outputs[0].status if len(post_model_outputs_response.outputs[0]) else None ) raise Exception( f"Post model outputs failed, status: " f"{post_model_outputs_response.status}, first output failure: " f"{first_output_failure}" ) embeddings = [ list(o.data.embeddings[0].vector) for o in post_model_outputs_response.outputs ] return embeddings[0]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/llms/clarifai.py
import logging from typing import Any, Dict, List, Optional from langchain_core.outputs import Generation, LLMResult from langchain_core.pydantic_v1 import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class Clarifai(LLM):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/llms/clarifai.py
"""Clarifai large language models. To use, you should have an account on the Clarifai platform, the ``clarifai`` python package installed, and the environment variable ``CLARIFAI_PAT`` set with your PAT key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Clarifai clarifai_llm = Clarifai(pat=CLARIFAI_PAT, \ user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID) """ stub: Any userDataObject: Any model_id: Optional[str] = None """Model id to use.""" model_version_id: Optional[str] = None """Model version id to use.""" app_id: Optional[str] = None """Clarifai application id to use.""" user_id: Optional[str] = None """Clarifai user id to use.""" pat: Optional[str] = None api_base: str = "https://api.clarifai.com" class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/llms/clarifai.py
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that we have all required info to access Clarifai platform and python package exists in environment.""" values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT") user_id = values.get("user_id") app_id = values.get("app_id") model_id = values.get("model_id") if values["pat"] is None: raise ValueError("Please provide a pat.") if user_id is None: raise ValueError("Please provide a user_id.") if app_id is None: raise ValueError("Please provide a app_id.") if model_id is None: raise ValueError("Please provide a model_id.") try: from clarifai.auth.helper import ClarifaiAuthHelper from clarifai.client import create_stub except ImportError: raise ImportError( "Could not import clarifai python package. "
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/llms/clarifai.py
"Please install it with `pip install clarifai`." ) auth = ClarifaiAuthHelper( user_id=user_id, app_id=app_id, pat=values["pat"], base=values["api_base"], ) values["userDataObject"] = auth.get_user_app_id_proto() values["stub"] = create_stub(auth) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Clarifai API.""" return {} @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { **{ "user_id": self.user_id, "app_id": self.app_id, "model_id": self.model_id, } } @property def _llm_type(self) -> str: """Return type of llm.""" return "clarifai" def _call(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/llms/clarifai.py
self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Clarfai's PostModelOutputs endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = clarifai_llm("Tell me a joke.") """ try: from clarifai_grpc.grpc.api import ( resources_pb2, service_pb2, ) from clarifai_grpc.grpc.api.status import status_code_pb2 except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/llms/clarifai.py
post_model_outputs_request = service_pb2.PostModelOutputsRequest( user_app_id=self.userDataObject, model_id=self.model_id, version_id=self.model_version_id, inputs=[ resources_pb2.Input( data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt)) ) ], ) post_model_outputs_response = self.stub.PostModelOutputs( post_model_outputs_request ) if post_model_outputs_response.status.code != status_code_pb2.SUCCESS: logger.error(post_model_outputs_response.status) first_model_failure = ( post_model_outputs_response.outputs[0].status if len(post_model_outputs_response.outputs) else None ) raise Exception( f"Post model outputs failed, status: " f"{post_model_outputs_response.status}, first output failure: " f"{first_model_failure}" ) text = post_model_outputs_response.outputs[0].data.text.raw if stop is not None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/llms/clarifai.py
text = enforce_stop_tokens(text, stop) return text def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" try: from clarifai_grpc.grpc.api import ( resources_pb2, service_pb2, ) from clarifai_grpc.grpc.api.status import status_code_pb2 except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) generations = [] batch_size = 32 for i in range(0, len(prompts), batch_size): batch = prompts[i : i + batch_size] post_model_outputs_request = service_pb2.PostModelOutputsRequest( user_app_id=self.userDataObject, model_id=self.model_id, version_id=self.model_version_id,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,175
ModuleNotFoundError: No module named 'clarifai.auth'
### System Info platform: Vagrant - Ubuntu 2204 python: 3.9.18 langchain version: 0.0.344 langchain core: 0.0.8 clarifai: 9.10.4 ### Who can help? @hwchase17 @agola11 ### Information - [X] The official example notebooks/scripts - [x] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Install the latest version of clarifai (9.10.4) 2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai ``` bash Could not import clarifai python package. Please install it with `pip install clarifai`. File 'clarifai.py', line 77, in validate_environment: raise ImportError( Traceback (most recent call last): File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment from clarifai.auth.helper import ClarifaiAuthHelper ModuleNotFoundError: No module named 'clarifai.auth' ``` ### Expected behavior I expect **ClarifaiAuthHelper** to import correctly. In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way: ``` python from clarifai.client.auth.helper import ClarifaiAuthHelper ```
https://github.com/langchain-ai/langchain/issues/14175
https://github.com/langchain-ai/langchain/pull/14215
ca8a022cd937ba398bb5544f4428f6ceafe56b84
8504ec56e4fc25308ba5baa4beaca944d9ff3371
"2023-12-02T15:28:09Z"
python
"2023-12-04T19:53:34Z"
libs/langchain/langchain/llms/clarifai.py
inputs=[ resources_pb2.Input( data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt)) ) for prompt in batch ], ) post_model_outputs_response = self.stub.PostModelOutputs( post_model_outputs_request ) if post_model_outputs_response.status.code != status_code_pb2.SUCCESS: logger.error(post_model_outputs_response.status) first_model_failure = ( post_model_outputs_response.outputs[0].status if len(post_model_outputs_response.outputs) else None ) raise Exception( f"Post model outputs failed, status: " f"{post_model_outputs_response.status}, first output failure: " f"{first_model_failure}" ) for output in post_model_outputs_response.outputs: if stop is not None: text = enforce_stop_tokens(output.data.text.raw, stop) else: text = output.data.text.raw generations.append([Generation(text=text)]) return LLMResult(generations=generations)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
"""Agent for working with pandas objects.""" from typing import Any, Dict, List, Optional, Sequence, Tuple from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.types import AgentType from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import SystemMessage from langchain.tools import BaseTool from langchain_experimental.agents.agent_toolkits.pandas.prompt import ( FUNCTIONS_WITH_DF, FUNCTIONS_WITH_MULTI_DF, MULTI_DF_PREFIX, MULTI_DF_PREFIX_FUNCTIONS, PREFIX, PREFIX_FUNCTIONS, SUFFIX_NO_DF, SUFFIX_WITH_DF, SUFFIX_WITH_MULTI_DF, ) from langchain_experimental.tools.python.tool import PythonAstREPLTool def _get_multi_prompt(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
dfs: List[Any], prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: num_dfs = len(dfs) if suffix is not None: suffix_to_use = suffix include_dfs_head = True elif include_df_in_prompt: suffix_to_use = SUFFIX_WITH_MULTI_DF include_dfs_head = True else: suffix_to_use = SUFFIX_NO_DF include_dfs_head = False if input_variables is None: input_variables = ["input", "agent_scratchpad", "num_dfs"] if include_dfs_head: input_variables += ["dfs_head"]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
if prefix is None: prefix = MULTI_DF_PREFIX df_locals = {} for i, dataframe in enumerate(dfs): df_locals[f"df{i + 1}"] = dataframe tools = [PythonAstREPLTool(locals=df_locals)] prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables ) partial_prompt = prompt.partial() if "dfs_head" in input_variables: dfs_head = "\n\n".join([d.head(number_of_head_rows).to_markdown() for d in dfs]) partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs), dfs_head=dfs_head) if "num_dfs" in input_variables: partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs)) return partial_prompt, tools def _get_single_prompt( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix include_df_head = True elif include_df_in_prompt: suffix_to_use = SUFFIX_WITH_DF include_df_head = True
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
else: suffix_to_use = SUFFIX_NO_DF include_df_head = False if input_variables is None: input_variables = ["input", "agent_scratchpad"] if include_df_head: input_variables += ["df_head"] if prefix is None: prefix = PREFIX tools = [PythonAstREPLTool(locals={"df": df})] prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables ) partial_prompt = prompt.partial() if "df_head" in input_variables: partial_prompt = partial_prompt.partial( df_head=str(df.head(number_of_head_rows).to_markdown()) ) return partial_prompt, tools def _get_prompt_and_tools( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: try: import pandas as pd pd.set_option("display.max_columns", None)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
except ImportError: raise ImportError( "pandas package not found, please install with `pip install pandas`" ) if include_df_in_prompt is not None and suffix is not None: raise ValueError("If suffix is specified, include_df_in_prompt should not be.") if isinstance(df, list): for item in df: if not isinstance(item, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_multi_prompt( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) else: if not isinstance(df, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_single_prompt( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) def _get_functions_single_prompt(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix if include_df_in_prompt: suffix_to_use = suffix_to_use.format( df_head=str(df.head(number_of_head_rows).to_markdown()) ) elif include_df_in_prompt: suffix_to_use = FUNCTIONS_WITH_DF.format( df_head=str(df.head(number_of_head_rows).to_markdown()) ) else: suffix_to_use = "" if prefix is None: prefix = PREFIX_FUNCTIONS tools = [PythonAstREPLTool(locals={"df": df})] system_message = SystemMessage(content=prefix + suffix_to_use) prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) return prompt, tools def _get_functions_multi_prompt(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
dfs: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix if include_df_in_prompt: dfs_head = "\n\n".join( [d.head(number_of_head_rows).to_markdown() for d in dfs] ) suffix_to_use = suffix_to_use.format( dfs_head=dfs_head, ) elif include_df_in_prompt: dfs_head = "\n\n".join([d.head(number_of_head_rows).to_markdown() for d in dfs]) suffix_to_use = FUNCTIONS_WITH_MULTI_DF.format( dfs_head=dfs_head, ) else: suffix_to_use = "" if prefix is None: prefix = MULTI_DF_PREFIX_FUNCTIONS prefix = prefix.format(num_dfs=str(len(dfs))) df_locals = {}
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
for i, dataframe in enumerate(dfs): df_locals[f"df{i + 1}"] = dataframe tools = [PythonAstREPLTool(locals=df_locals)] system_message = SystemMessage(content=prefix + suffix_to_use) prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) return prompt, tools def _get_functions_prompt_and_tools( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: try: import pandas as pd pd.set_option("display.max_columns", None) except ImportError: raise ImportError( "pandas package not found, please install with `pip install pandas`" ) if input_variables is not None: raise ValueError("`input_variables` is not supported at the moment.") if include_df_in_prompt is not None and suffix is not None: raise ValueError("If suffix is specified, include_df_in_prompt should not be.") if isinstance(df, list): for item in df: if not isinstance(item, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_functions_multi_prompt(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
df, prefix=prefix, suffix=suffix, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) else: if not isinstance(df, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_functions_single_prompt( df, prefix=prefix, suffix=suffix, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) def create_pandas_dataframe_agent( llm: BaseLanguageModel, df: Any, agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager: Optional[BaseCallbackManager] = None, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", agent_executor_kwargs: Optional[Dict[str, Any]] = None,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, extra_tools: Sequence[BaseTool] = (), **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a pandas agent from an LLM and dataframe.""" agent: BaseSingleActionAgent if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: prompt, base_tools = _get_prompt_and_tools( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) tools = base_tools + list(extra_tools) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent( llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs, ) elif agent_type == AgentType.OPENAI_FUNCTIONS:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,737
`extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working
### System Info Platform: Windows Server 2022 Python: 3.11.6 Langchain version: 0.0.306 ### Who can help? @agola11 @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [X] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ### Creating a test tool ```py from langchain.agents import Tool tools = [ Tool( name="test_tool", func=print, description="This is a test tool" ) ] tools ``` ``` [Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Setting up the pandas_dataframe_agent ```py from langchain.agents import create_pandas_dataframe_agent from langchain.llms import HuggingFacePipeline import pandas as pd llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation", device=0 ) agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools) agent.tools ``` ``` [PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame Columns: [] Index: []}, sanitize_input=True), Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)] ``` ### Executing agent with debugging enabled ```py import langchain langchain.debug = True agent.run('What is 2+2?') ``` ``` [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { "input": "What is 2+2?" } [chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input: { "input": "What is 2+2?", "agent_scratchpad": "", "stop": [ "\nObservation:", "\n\tObservation:" ] } [llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input: { "prompts": [ "You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?" ] } ``` ### The prompt from the above log ``` You are working with a pandas dataframe in Python. The name of the dataframe is `df`. You should use the tools below to answer the question posed of you: python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer. Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [python_repl_ast] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question This is the result of `print(df.head())`: Begin! Question: What is 2+2? ``` ### Expected behavior Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast?
https://github.com/langchain-ai/langchain/issues/11737
https://github.com/langchain-ai/langchain/pull/13203
77a15fa9888a3e81a014895a6ec3f1b34c016d06
f758c8adc43ebbbdb3a13caa5a022a2d043229cc
"2023-10-12T22:22:09Z"
python
"2023-12-05T04:54:08Z"
libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py
_prompt, base_tools = _get_functions_prompt_and_tools( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) tools = base_tools + list(extra_tools) agent = OpenAIFunctionsAgent( llm=llm, prompt=_prompt, tools=tools, callback_manager=callback_manager, **kwargs, ) else: raise ValueError(f"Agent type {agent_type} not supported at the moment.") return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/langchain/retrievers/multi_vector.py
from enum import Enum from typing import List, Optional from langchain_core.documents import Document from langchain_core.retrievers import BaseRetriever from langchain_core.stores import BaseStore, ByteStore from langchain_core.vectorstores import VectorStore from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.storage._lc_store import create_kv_docstore class SearchType(str, Enum): """Enumerator of the types of search to perform.""" similarity = "similarity" """Similarity search.""" mmr = "mmr" """Maximal Marginal Relevance reranking of similarity search.""" class MultiVectorRetriever(BaseRetriever): """Retrieve from a set of multiple embeddings for the same document.""" vectorstore: VectorStore """The underlying vectorstore to use to store small chunks and their embedding vectors""" docstore: BaseStore[str, Document] """The storage layer for the parent documents""" id_key: str search_kwargs: dict """Keyword arguments to pass to the search function.""" search_type: SearchType """Type of search to perform (similarity / mmr)""" def __init__(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/langchain/retrievers/multi_vector.py
self, *, vectorstore: VectorStore, docstore: Optional[BaseStore[str, Document]] = None, base_store: Optional[ByteStore] = None, id_key: str = "doc_id", search_kwargs: Optional[dict] = None, search_type: SearchType = SearchType.similarity, ): if base_store is not None: docstore = create_kv_docstore(base_store) elif docstore is None: raise Exception("You must pass a `base_store` parameter.") super().__init__( vectorstore=vectorstore, docstore=docstore, id_key=id_key, search_kwargs=search_kwargs if search_kwargs is not None else {}, search_type=search_type, ) def _get_relevant_documents(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/langchain/retrievers/multi_vector.py
self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ if self.search_type == SearchType.mmr: sub_docs = self.vectorstore.max_marginal_relevance_search( query, **self.search_kwargs ) else: sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs) ids = [] for d in sub_docs: if d.metadata[self.id_key] not in ids: ids.append(d.metadata[self.id_key]) docs = self.docstore.mget(ids) return [d for d in docs if d is not None]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
from datetime import datetime from typing import ( Any, AsyncIterator, Dict, Iterable, Iterator, List, Optional, Sequence, Type, ) from unittest.mock import patch import pytest import pytest_asyncio from langchain_core.documents import Document from langchain_core.vectorstores import VST, VectorStore import langchain.vectorstores from langchain.document_loaders.base import BaseLoader from langchain.embeddings.base import Embeddings from langchain.indexes import aindex, index from langchain.indexes._api import _abatch from langchain.indexes._sql_record_manager import SQLRecordManager class ToyLoader(BaseLoader):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
"""Toy loader that always returns the same documents.""" def __init__(self, documents: Sequence[Document]) -> None: """Initialize with the documents to return.""" self.documents = documents def lazy_load( self, ) -> Iterator[Document]: yield from self.documents def load(self) -> List[Document]: """Load the documents from the source.""" return list(self.lazy_load()) async def alazy_load( self, ) -> AsyncIterator[Document]: async def async_generator() -> AsyncIterator[Document]: for document in self.documents: yield document return async_generator() async def aload(self) -> List[Document]: """Load the documents from the source.""" return [doc async for doc in await self.alazy_load()] class InMemoryVectorStore(VectorStore): """In-memory implementation of VectorStore using a dictionary.""" def __init__(self) -> None: """Vector store interface for testing things in memory.""" self.store: Dict[str, Document] = {} def delete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
"""Delete the given documents from the store using their IDs.""" if ids: for _id in ids: self.store.pop(_id, None) async def adelete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None: """Delete the given documents from the store using their IDs.""" if ids: for _id in ids: self.store.pop(_id, None) def add_documents( self, documents: Sequence[Document], *, ids: Optional[Sequence[str]] = None, **kwargs: Any, ) -> None: """Add the given documents to the store (insert behavior).""" if ids and len(ids) != len(documents): raise ValueError( f"Expected {len(ids)} ids, got {len(documents)} documents." ) if not ids: raise NotImplementedError("This is not implemented yet.") for _id, document in zip(ids, documents): if _id in self.store: raise ValueError( f"Document with uid {_id} already exists in the store." ) self.store[_id] = document async def aadd_documents(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
self, documents: Sequence[Document], *, ids: Optional[Sequence[str]] = None, **kwargs: Any, ) -> List[str]: if ids and len(ids) != len(documents): raise ValueError( f"Expected {len(ids)} ids, got {len(documents)} documents." ) if not ids: raise NotImplementedError("This is not implemented yet.") for _id, document in zip(ids, documents): if _id in self.store: raise ValueError( f"Document with uid {_id} already exists in the store." ) self.store[_id] = document return list(ids) def add_texts(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> List[str]: """Add the given texts to the store (insert behavior).""" raise NotImplementedError() @classmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> VST: """Create a vector store from a list of texts.""" raise NotImplementedError() def similarity_search(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Find the most similar documents to the given query.""" raise NotImplementedError() @pytest.fixture def record_manager() -> SQLRecordManager: """Timestamped set fixture.""" record_manager = SQLRecordManager("kittens", db_url="sqlite:///:memory:") record_manager.create_schema() return record_manager @pytest_asyncio.fixture @pytest.mark.requires("aiosqlite") async def arecord_manager() -> SQLRecordManager: """Timestamped set fixture.""" record_manager = SQLRecordManager( "kittens", db_url="sqlite+aiosqlite:///:memory:", async_mode=True, ) await record_manager.acreate_schema() return record_manager @pytest.fixture def vector_store() -> InMemoryVectorStore: """Vector store fixture.""" return InMemoryVectorStore() def test_indexing_same_content(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Indexing some content to confirm it gets added only once.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", ), Document( page_content="This is another document.", ), ] ) assert index(loader, record_manager, vector_store) == { "num_added": 2, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } assert len(list(vector_store.store)) == 2 for _ in range(2): assert index(loader, record_manager, vector_store) == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } @pytest.mark.requires("aiosqlite") async def test_aindexing_same_content(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Indexing some content to confirm it gets added only once.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", ), Document( page_content="This is another document.", ), ] ) assert await aindex(await loader.alazy_load(), arecord_manager, vector_store) == { "num_added": 2, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } assert len(list(vector_store.store)) == 2 for _ in range(2): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
) == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } def test_index_simple_delete_full( record_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Indexing some content to confirm it gets added only once.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", ), Document( page_content="This is another document.", ), ] ) with patch.object( record_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp() ): assert index(loader, record_manager, vector_store, cleanup="full") == { "num_added": 2, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } with patch.object(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
record_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp() ): assert index(loader, record_manager, vector_store, cleanup="full") == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } loader = ToyLoader( documents=[ Document( page_content="mutated document 1", ), Document( page_content="This is another document.", ), ] ) with patch.object( record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() ): assert index(loader, record_manager, vector_store, cleanup="full") == { "num_added": 1, "num_deleted": 1, "num_skipped": 1, "num_updated": 0, } doc_texts = set( vector_store.store.get(uid).page_content
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
for uid in vector_store.store ) assert doc_texts == {"mutated document 1", "This is another document."} with patch.object( record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() ): assert index(loader, record_manager, vector_store, cleanup="full") == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } @pytest.mark.requires("aiosqlite") async def test_aindex_simple_delete_full( arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Indexing some content to confirm it gets added only once.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", ), Document( page_content="This is another document.", ), ] ) with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 1).timestamp()
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="full" ) == { "num_added": 2, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 1).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="full" ) == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } loader = ToyLoader( documents=[ Document( page_content="mutated document 1", ), Document( page_content="This is another document.", ), ] )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="full" ) == { "num_added": 1, "num_deleted": 1, "num_skipped": 1, "num_updated": 0, } doc_texts = set( vector_store.store.get(uid).page_content for uid in vector_store.store ) assert doc_texts == {"mutated document 1", "This is another document."} with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="full" ) == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } def test_incremental_fails_with_bad_source_ids(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Test indexing with incremental deletion strategy.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ), Document( page_content="This is yet another document.", metadata={"source": None}, ), ] ) with pytest.raises(ValueError): index(loader, record_manager, vector_store, cleanup="incremental") with pytest.raises(ValueError): index( loader,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
record_manager, vector_store, cleanup="incremental", source_id_key="source", ) @pytest.mark.requires("aiosqlite") async def test_aincremental_fails_with_bad_source_ids( arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Test indexing with incremental deletion strategy.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ), Document( page_content="This is yet another document.", metadata={"source": None}, ), ] ) with pytest.raises(ValueError): await aindex( await loader.alazy_load(),
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
arecord_manager, vector_store, cleanup="incremental", ) with pytest.raises(ValueError): await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="incremental", source_id_key="source", ) def test_no_delete( record_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Test indexing without a deletion strategy.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ), ] ) with patch.object(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() ): assert index( loader, record_manager, vector_store, cleanup=None, source_id_key="source", ) == { "num_added": 2, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } with patch.object( record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() ): assert index( loader, record_manager, vector_store, cleanup=None, source_id_key="source", ) == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
loader = ToyLoader( documents=[ Document( page_content="mutated content", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ), ] ) with patch.object( record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() ): assert index( loader, record_manager, vector_store, cleanup=None, source_id_key="source", ) == { "num_added": 1, "num_deleted": 0, "num_skipped": 1, "num_updated": 0, } @pytest.mark.requires("aiosqlite") async def test_ano_delete(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Test indexing without a deletion strategy.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ), ] ) with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup=None, source_id_key="source", ) == { "num_added": 2, "num_deleted": 0, "num_skipped": 0,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
"num_updated": 0, } with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup=None, source_id_key="source", ) == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } loader = ToyLoader( documents=[ Document( page_content="mutated content", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ), ] )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup=None, source_id_key="source", ) == { "num_added": 1, "num_deleted": 0, "num_skipped": 1, "num_updated": 0, } def test_incremental_delete( record_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Test indexing with incremental deletion strategy.""" loader = ToyLoader( documents=[ Document( page_content="This is a test document.", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ),
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
] ) with patch.object( record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() ): assert index( loader, record_manager, vector_store, cleanup="incremental", source_id_key="source", ) == { "num_added": 2, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } doc_texts = set( vector_store.store.get(uid).page_content for uid in vector_store.store ) assert doc_texts == {"This is another document.", "This is a test document."} with patch.object( record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() ): assert index( loader, record_manager,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
vector_store, cleanup="incremental", source_id_key="source", ) == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } loader = ToyLoader( documents=[ Document( page_content="mutated document 1", metadata={"source": "1"}, ), Document( page_content="mutated document 2", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ), ] ) with patch.object( record_manager, "get_time", return_value=datetime(2021, 1, 3).timestamp() ):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
assert index( loader, record_manager, vector_store, cleanup="incremental", source_id_key="source", ) == { "num_added": 2, "num_deleted": 1, "num_skipped": 1, "num_updated": 0, } doc_texts = set( vector_store.store.get(uid).page_content for uid in vector_store.store ) assert doc_texts == { "mutated document 1", "mutated document 2", "This is another document.", } @pytest.mark.requires("aiosqlite") async def test_aincremental_delete( arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore ) -> None: """Test indexing with incremental deletion strategy.""" loader = ToyLoader( documents=[ Document(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
page_content="This is a test document.", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"}, ), ] ) with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="incremental", source_id_key="source", ) == { "num_added": 2, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } doc_texts = set( vector_store.store.get(uid).page_content for uid in vector_store.store ) assert doc_texts == {"This is another document.", "This is a test document."}
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="incremental", source_id_key="source", ) == { "num_added": 0, "num_deleted": 0, "num_skipped": 2, "num_updated": 0, } loader = ToyLoader( documents=[ Document( page_content="mutated document 1", metadata={"source": "1"}, ), Document( page_content="mutated document 2", metadata={"source": "1"}, ), Document( page_content="This is another document.", metadata={"source": "2"},
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
), ] ) with patch.object( arecord_manager, "aget_time", return_value=datetime(2021, 1, 3).timestamp() ): assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="incremental", source_id_key="source", ) == { "num_added": 2, "num_deleted": 1, "num_skipped": 1, "num_updated": 0, } doc_texts = set( vector_store.store.get(uid).page_content for uid in vector_store.store ) assert doc_texts == { "mutated document 1", "mutated document 2", "This is another document.", } def test_indexing_with_no_docs(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
record_manager: SQLRecordManager, vector_store: VectorStore ) -> None: """Check edge case when loader returns no new docs.""" loader = ToyLoader(documents=[]) assert index(loader, record_manager, vector_store, cleanup="full") == { "num_added": 0, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } @pytest.mark.requires("aiosqlite") async def test_aindexing_with_no_docs( arecord_manager: SQLRecordManager, vector_store: VectorStore ) -> None: """Check edge case when loader returns no new docs.""" loader = ToyLoader(documents=[]) assert await aindex( await loader.alazy_load(), arecord_manager, vector_store, cleanup="full" ) == { "num_added": 0, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } def test_deduplication(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
record_manager: SQLRecordManager, vector_store: VectorStore ) -> None: """Check edge case when loader returns no new docs.""" docs = [ Document( page_content="This is a test document.", metadata={"source": "1"}, ), Document( page_content="This is a test document.", metadata={"source": "1"}, ), ] assert index(docs, record_manager, vector_store, cleanup="full") == { "num_added": 1, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } @pytest.mark.requires("aiosqlite") async def test_adeduplication(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
14,342
Error:
### System Info I try this example code ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) ``` # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) but I encountered an error: ``` 1 # Initialize the retriever ----> 2 parent_document_retriever = ParentDocumentRetriever( 3 vectorstore=vectorstore, 4 docstore=store, 5 child_splitter=child_splitter, TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter' ``` ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` from langchain.retrievers import ParentDocumentRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings()) # Initialize the retriever parent_document_retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) ``` ### Expected behavior I can run.
https://github.com/langchain-ai/langchain/issues/14342
https://github.com/langchain-ai/langchain/pull/14350
7bdfc43766e72e4b67512bd85119b1c797035b86
867ca6d0bec2dac5330257bc886880743f3ece4d
"2023-12-06T11:09:11Z"
python
"2023-12-06T19:12:50Z"
libs/langchain/tests/unit_tests/indexes/test_indexing.py
arecord_manager: SQLRecordManager, vector_store: VectorStore ) -> None: """Check edge case when loader returns no new docs.""" docs = [ Document( page_content="This is a test document.", metadata={"source": "1"}, ), Document( page_content="This is a test document.", metadata={"source": "1"}, ), ] assert await aindex(docs, arecord_manager, vector_store, cleanup="full") == { "num_added": 1, "num_deleted": 0, "num_skipped": 0, "num_updated": 0, } def test_cleanup_with_different_batchsize(