method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
json | return self.json_data | def json(self) ->Dict:
return self.json_data | null |
test_konko_generation_test | """Check ChatKonko's generation ability."""
chat_instance = ChatKonko(max_tokens=10, n=2)
msg = HumanMessage(content='Hi')
gen_response = chat_instance.generate([[msg], [msg]])
assert isinstance(gen_response, LLMResult)
assert len(gen_response.generations) == 2
for gen_list in gen_response.generations:
assert len(gen_list) == 2
for gen in gen_list:
assert isinstance(gen, ChatGeneration)
assert isinstance(gen.text, str)
assert gen.text == gen.message.content | def test_konko_generation_test() ->None:
"""Check ChatKonko's generation ability."""
chat_instance = ChatKonko(max_tokens=10, n=2)
msg = HumanMessage(content='Hi')
gen_response = chat_instance.generate([[msg], [msg]])
assert isinstance(gen_response, LLMResult)
assert len(gen_response.generations) == 2
for gen_list in gen_response.generations:
assert len(gen_list) == 2
for gen in gen_list:
assert isinstance(gen, ChatGeneration)
assert isinstance(gen.text, str)
assert gen.text == gen.message.content | Check ChatKonko's generation ability. |
_call | """Call to Gradients API `model/{id}/complete`.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
try:
response = requests.post(**self._kwargs_post_request(prompt, kwargs))
if response.status_code != 200:
raise Exception(
f'Gradient returned an unexpected response with status {response.status_code}: {response.text}'
)
except requests.exceptions.RequestException as e:
raise Exception(f'RequestException while calling Gradient Endpoint: {e}')
text = response.json()['generatedOutput']
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call to Gradients API `model/{id}/complete`.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
try:
response = requests.post(**self._kwargs_post_request(prompt, kwargs))
if response.status_code != 200:
raise Exception(
f'Gradient returned an unexpected response with status {response.status_code}: {response.text}'
)
except requests.exceptions.RequestException as e:
raise Exception(
f'RequestException while calling Gradient Endpoint: {e}')
text = response.json()['generatedOutput']
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | Call to Gradients API `model/{id}/complete`.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model. |
_load_structured_doc | cli, field_content = self._create_rspace_client()
yield self._get_doc(cli, field_content, self.global_id) | def _load_structured_doc(self) ->Iterator[Document]:
cli, field_content = self._create_rspace_client()
yield self._get_doc(cli, field_content, self.global_id) | null |
__init__ | """Initialize with an access token and a resource.
Args:
access_token: The access token.
resource: The resource.
"""
self.access_token = access_token
self.resource = resource
self.headers = {'Authorization': f'Bearer {self.access_token}', 'Accept':
'application/json'} | def __init__(self, access_token: str, resource: str) ->None:
"""Initialize with an access token and a resource.
Args:
access_token: The access token.
resource: The resource.
"""
self.access_token = access_token
self.resource = resource
self.headers = {'Authorization': f'Bearer {self.access_token}',
'Accept': 'application/json'} | Initialize with an access token and a resource.
Args:
access_token: The access token.
resource: The resource. |
render_text_description_and_args | """Render the tool name, description, and args in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
args_schema = str(tool.args)
tool_strings.append(f'{tool.name}: {tool.description}, args: {args_schema}'
)
return '\n'.join(tool_strings) | def render_text_description_and_args(tools: List[BaseTool]) ->str:
"""Render the tool name, description, and args in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
args_schema = str(tool.args)
tool_strings.append(
f'{tool.name}: {tool.description}, args: {args_schema}')
return '\n'.join(tool_strings) | Render the tool name, description, and args in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, args: {"expression": {"type": "string"}} |
_select_relevance_score_fn | """
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return self._euclidean_relevance_score_fn
else:
raise ValueError(
'Unknown distance strategy, must be cosine, max_inner_product (dot product), or euclidean'
) | def _select_relevance_score_fn(self) ->Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return self._euclidean_relevance_score_fn
else:
raise ValueError(
'Unknown distance strategy, must be cosine, max_inner_product (dot product), or euclidean'
) | The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc. |
test_filters_combination | tf1 = Tag('tag_field') == ['tag1', 'tag2']
tf2 = Tag('tag_field') == 'tag3'
combined = tf1 & tf2
assert str(combined) == '(@tag_field:{tag1|tag2} @tag_field:{tag3})'
combined = tf1 | tf2
assert str(combined) == '(@tag_field:{tag1|tag2} | @tag_field:{tag3})'
tf1 = Tag('tag_field') == []
assert str(tf1) == '*'
assert str(tf1 & tf2) == str(tf2)
assert str(tf1 | tf2) == str(tf2)
tf1 = Tag('tag_field') == None
tf2 = Tag('tag_field') == ''
assert str(tf1 & tf2) == '*'
tf1 = Tag('tag_field') == None
tf2 = Tag('tag_field') == 'tag'
assert str(tf1 & tf2) == str(tf2)
tf1 = Tag('tag_field') == None
tf2 = Tag('tag_field') == ['tag1', 'tag2']
assert str(tf1 & tf2) == str(tf2)
tf1 = Tag('tag_field') == None
tf2 = Tag('tag_field') != None
assert str(tf1 & tf2) == '*'
tf1 = Tag('tag_field') == ''
tf2 = Tag('tag_field') == 'tag'
tf3 = Tag('tag_field') == ['tag1', 'tag2']
assert str(tf1 & tf2 & tf3) == str(tf2 & tf3)
tf1 = Tag('tag_field') == None
tf2 = Num('num_field') == None
tf3 = Text('text_field') == None
assert str(tf1 & tf2 & tf3) == '*'
tf1 = Tag('tag_field') != None
tf2 = Num('num_field') != None
tf3 = Text('text_field') != None
assert str(tf1 & tf2 & tf3) == '*'
tf1 = Tag('tag_field') == 'tag'
tf2 = Num('num_field') == None
tf3 = Text('text_field') == None
assert str(tf1 & tf2 & tf3) == str(tf1) | def test_filters_combination() ->None:
tf1 = Tag('tag_field') == ['tag1', 'tag2']
tf2 = Tag('tag_field') == 'tag3'
combined = tf1 & tf2
assert str(combined) == '(@tag_field:{tag1|tag2} @tag_field:{tag3})'
combined = tf1 | tf2
assert str(combined) == '(@tag_field:{tag1|tag2} | @tag_field:{tag3})'
tf1 = Tag('tag_field') == []
assert str(tf1) == '*'
assert str(tf1 & tf2) == str(tf2)
assert str(tf1 | tf2) == str(tf2)
tf1 = Tag('tag_field') == None
tf2 = Tag('tag_field') == ''
assert str(tf1 & tf2) == '*'
tf1 = Tag('tag_field') == None
tf2 = Tag('tag_field') == 'tag'
assert str(tf1 & tf2) == str(tf2)
tf1 = Tag('tag_field') == None
tf2 = Tag('tag_field') == ['tag1', 'tag2']
assert str(tf1 & tf2) == str(tf2)
tf1 = Tag('tag_field') == None
tf2 = Tag('tag_field') != None
assert str(tf1 & tf2) == '*'
tf1 = Tag('tag_field') == ''
tf2 = Tag('tag_field') == 'tag'
tf3 = Tag('tag_field') == ['tag1', 'tag2']
assert str(tf1 & tf2 & tf3) == str(tf2 & tf3)
tf1 = Tag('tag_field') == None
tf2 = Num('num_field') == None
tf3 = Text('text_field') == None
assert str(tf1 & tf2 & tf3) == '*'
tf1 = Tag('tag_field') != None
tf2 = Num('num_field') != None
tf3 = Text('text_field') != None
assert str(tf1 & tf2 & tf3) == '*'
tf1 = Tag('tag_field') == 'tag'
tf2 = Num('num_field') == None
tf3 = Text('text_field') == None
assert str(tf1 & tf2 & tf3) == str(tf1) | null |
test_load_no_result | docs = retriever.get_relevant_documents('1605.08386WWW')
assert not docs | def test_load_no_result(retriever: PubMedRetriever) ->None:
docs = retriever.get_relevant_documents('1605.08386WWW')
assert not docs | null |
__next__ | """Return the next retrieved token."""
val = self.get()
if val is None or val in self._stop_words:
self.client.stop_stream('tensorrt_llm', self.request_id, signal=not
self._batch)
raise StopIteration()
return val | def __next__(self) ->str:
"""Return the next retrieved token."""
val = self.get()
if val is None or val in self._stop_words:
self.client.stop_stream('tensorrt_llm', self.request_id, signal=not
self._batch)
raise StopIteration()
return val | Return the next retrieved token. |
configure | """Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
CallbackManager: The configured callback manager.
"""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose,
inheritable_tags, local_tags, inheritable_metadata, local_metadata) | @classmethod
def configure(cls, inheritable_callbacks: Callbacks=None, local_callbacks:
Callbacks=None, verbose: bool=False, inheritable_tags: Optional[List[
str]]=None, local_tags: Optional[List[str]]=None, inheritable_metadata:
Optional[Dict[str, Any]]=None, local_metadata: Optional[Dict[str, Any]]
=None) ->CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
CallbackManager: The configured callback manager.
"""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose,
inheritable_tags, local_tags, inheritable_metadata, local_metadata) | Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
CallbackManager: The configured callback manager. |
_type | return 'guardrails' | @property
def _type(self) ->str:
return 'guardrails' | null |
convert_messages_to_prompt | if provider == 'anthropic':
prompt = convert_messages_to_prompt_anthropic(messages=messages)
elif provider == 'meta':
prompt = convert_messages_to_prompt_llama(messages=messages)
else:
raise NotImplementedError(
f'Provider {provider} model does not support chat.')
return prompt | @classmethod
def convert_messages_to_prompt(cls, provider: str, messages: List[BaseMessage]
) ->str:
if provider == 'anthropic':
prompt = convert_messages_to_prompt_anthropic(messages=messages)
elif provider == 'meta':
prompt = convert_messages_to_prompt_llama(messages=messages)
else:
raise NotImplementedError(
f'Provider {provider} model does not support chat.')
return prompt | null |
test_tencent_vector_db_no_drop | """Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
del docsearch
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas, drop=False)
time.sleep(3)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6 | def test_tencent_vector_db_no_drop() ->None:
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
del docsearch
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas, drop=False)
time.sleep(3)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6 | Test end to end construction and MRR search. |
_convert_one_message_to_text_llama | if isinstance(message, ChatMessage):
message_text = f'\n\n{message.role.capitalize()}: {message.content}'
elif isinstance(message, HumanMessage):
message_text = f'[INST] {message.content} [/INST]'
elif isinstance(message, AIMessage):
message_text = f'{message.content}'
elif isinstance(message, SystemMessage):
message_text = f'<<SYS>> {message.content} <</SYS>>'
else:
raise ValueError(f'Got unknown type {message}')
return message_text | def _convert_one_message_to_text_llama(message: BaseMessage) ->str:
if isinstance(message, ChatMessage):
message_text = f'\n\n{message.role.capitalize()}: {message.content}'
elif isinstance(message, HumanMessage):
message_text = f'[INST] {message.content} [/INST]'
elif isinstance(message, AIMessage):
message_text = f'{message.content}'
elif isinstance(message, SystemMessage):
message_text = f'<<SYS>> {message.content} <</SYS>>'
else:
raise ValueError(f'Got unknown type {message}')
return message_text | null |
test_adding_document | """Test that documents are added correctly."""
_dict = {'foo': Document(page_content='bar')}
docstore = InMemoryDocstore(_dict)
new_dict = {'bar': Document(page_content='foo')}
docstore.add(new_dict)
foo_output = docstore.search('bar')
assert isinstance(foo_output, Document)
assert foo_output.page_content == 'foo'
bar_output = docstore.search('foo')
assert isinstance(bar_output, Document)
assert bar_output.page_content == 'bar' | def test_adding_document() ->None:
"""Test that documents are added correctly."""
_dict = {'foo': Document(page_content='bar')}
docstore = InMemoryDocstore(_dict)
new_dict = {'bar': Document(page_content='foo')}
docstore.add(new_dict)
foo_output = docstore.search('bar')
assert isinstance(foo_output, Document)
assert foo_output.page_content == 'foo'
bar_output = docstore.search('foo')
assert isinstance(bar_output, Document)
assert bar_output.page_content == 'bar' | Test that documents are added correctly. |
_get_run_log | """Get run log"""
run_log = RunLog(state=None)
for log_patch in run_log_patches:
run_log = run_log + log_patch
return run_log | def _get_run_log(run_log_patches: Sequence[RunLogPatch]) ->RunLog:
"""Get run log"""
run_log = RunLog(state=None)
for log_patch in run_log_patches:
run_log = run_log + log_patch
return run_log | Get run log |
embed_query | return self.embed_documents([text])[0] | def embed_query(self, text: str) ->List[float]:
return self.embed_documents([text])[0] | null |
create_prompt | tool_strings = '\n'.join([f'{tool.name}: {tool.description}' for tool in tools]
)
tool_names = ', '.join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = '\n\n'.join([system_message_prefix, tool_strings,
format_instructions, system_message_suffix])
messages = [SystemMessagePromptTemplate.from_template(template),
HumanMessagePromptTemplate.from_template(human_message)]
if input_variables is None:
input_variables = ['input', 'agent_scratchpad']
return ChatPromptTemplate(input_variables=input_variables, messages=messages) | @classmethod
def create_prompt(cls, tools: Sequence[BaseTool], system_message_prefix:
str=SYSTEM_MESSAGE_PREFIX, system_message_suffix: str=
SYSTEM_MESSAGE_SUFFIX, human_message: str=HUMAN_MESSAGE,
format_instructions: str=FORMAT_INSTRUCTIONS, input_variables: Optional
[List[str]]=None) ->BasePromptTemplate:
tool_strings = '\n'.join([f'{tool.name}: {tool.description}' for tool in
tools])
tool_names = ', '.join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = '\n\n'.join([system_message_prefix, tool_strings,
format_instructions, system_message_suffix])
messages = [SystemMessagePromptTemplate.from_template(template),
HumanMessagePromptTemplate.from_template(human_message)]
if input_variables is None:
input_variables = ['input', 'agent_scratchpad']
return ChatPromptTemplate(input_variables=input_variables, messages=
messages) | null |
on_llm_end | """Run when LLM ends."""
if len(response.generations) == 0 or len(response.generations[0]) == 0:
return
if not self.chain_run_id:
generation = response.generations[0][0]
self.messages.append(self.message_model(message=generation.text, role=
self.message_role_model.ASSISTANT))
self._log_conversation() | def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None:
"""Run when LLM ends."""
if len(response.generations) == 0 or len(response.generations[0]) == 0:
return
if not self.chain_run_id:
generation = response.generations[0][0]
self.messages.append(self.message_model(message=generation.text,
role=self.message_role_model.ASSISTANT))
self._log_conversation() | Run when LLM ends. |
_embed | task_type = self.task_type or 'retrieval_document'
try:
result = genai.embed_content(model=self.model, content=texts, task_type
=task_type, title=title)
except Exception as e:
raise GoogleGenerativeAIError(f'Error embedding content: {e}') from e
return result['embedding'] | def _embed(self, texts: List[str], task_type: str, title: Optional[str]=None
) ->List[List[float]]:
task_type = self.task_type or 'retrieval_document'
try:
result = genai.embed_content(model=self.model, content=texts,
task_type=task_type, title=title)
except Exception as e:
raise GoogleGenerativeAIError(f'Error embedding content: {e}') from e
return result['embedding'] | null |
get_tools | """Get the tools in the toolkit."""
return [GmailCreateDraft(api_resource=self.api_resource), GmailSendMessage(
api_resource=self.api_resource), GmailSearch(api_resource=self.
api_resource), GmailGetMessage(api_resource=self.api_resource),
GmailGetThread(api_resource=self.api_resource)] | def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
return [GmailCreateDraft(api_resource=self.api_resource),
GmailSendMessage(api_resource=self.api_resource), GmailSearch(
api_resource=self.api_resource), GmailGetMessage(api_resource=self.
api_resource), GmailGetThread(api_resource=self.api_resource)] | Get the tools in the toolkit. |
test_huggingfacehub_embedding_documents | """Test huggingfacehub embeddings."""
documents = ['foo bar']
embedding = HuggingFaceHubEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768 | def test_huggingfacehub_embedding_documents() ->None:
"""Test huggingfacehub embeddings."""
documents = ['foo bar']
embedding = HuggingFaceHubEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768 | Test huggingfacehub embeddings. |
sse_invoke | if self.model == 'chatglm_turbo':
return self.zhipuai.model_api.sse_invoke(model=self.model, prompt=
prompt, top_p=self.top_p, temperature=self.temperature, request_id=
self.request_id, return_type=self.return_type, incremental=self.
incremental)
elif self.model == 'characterglm':
meta = self.meta.dict()
return self.zhipuai.model_api.sse_invoke(model=self.model, prompt=
prompt, meta=meta, request_id=self.request_id, return_type=self.
return_type, incremental=self.incremental)
return None | def sse_invoke(self, prompt):
if self.model == 'chatglm_turbo':
return self.zhipuai.model_api.sse_invoke(model=self.model, prompt=
prompt, top_p=self.top_p, temperature=self.temperature,
request_id=self.request_id, return_type=self.return_type,
incremental=self.incremental)
elif self.model == 'characterglm':
meta = self.meta.dict()
return self.zhipuai.model_api.sse_invoke(model=self.model, prompt=
prompt, meta=meta, request_id=self.request_id, return_type=self
.return_type, incremental=self.incremental)
return None | null |
test_openai_opeanapi_headers | BRANDFETCH_API_KEY = os.environ.get('BRANDFETCH_API_KEY')
headers = {'Authorization': f'Bearer {BRANDFETCH_API_KEY}'}
file_path = str(Path(__file__).parents[2] /
'examples/brandfetch-brandfetch-2.0.0-resolved.json')
chain = get_openapi_chain(file_path, headers=headers)
output = chain.run('I want to know about nike.comgg')
assert isinstance(output, str) | def test_openai_opeanapi_headers() ->None:
BRANDFETCH_API_KEY = os.environ.get('BRANDFETCH_API_KEY')
headers = {'Authorization': f'Bearer {BRANDFETCH_API_KEY}'}
file_path = str(Path(__file__).parents[2] /
'examples/brandfetch-brandfetch-2.0.0-resolved.json')
chain = get_openapi_chain(file_path, headers=headers)
output = chain.run('I want to know about nike.comgg')
assert isinstance(output, str) | null |
_import_wikipedia_tool | from langchain_community.tools.wikipedia.tool import WikipediaQueryRun
return WikipediaQueryRun | def _import_wikipedia_tool() ->Any:
from langchain_community.tools.wikipedia.tool import WikipediaQueryRun
return WikipediaQueryRun | null |
test_backends | index, filter_query, embeddings = request.getfixturevalue(backend)
retriever = DocArrayRetriever(index=index, embeddings=embeddings,
search_field='title_embedding', content_field='title')
docs = retriever.get_relevant_documents('my docs')
assert len(docs) == 1
assert 'My document' in docs[0].page_content
assert 'id' in docs[0].metadata and 'year' in docs[0].metadata
assert 'other_emb' not in docs[0].metadata
retriever = DocArrayRetriever(index=index, embeddings=embeddings,
search_field='title_embedding', content_field='title', filters=filter_query
)
docs = retriever.get_relevant_documents('my docs')
assert len(docs) == 1
assert 'My document' in docs[0].page_content
assert 'id' in docs[0].metadata and 'year' in docs[0].metadata
assert 'other_emb' not in docs[0].metadata
assert docs[0].metadata['year'] <= 90
retriever = DocArrayRetriever(index=index, embeddings=embeddings,
search_field='title_embedding', search_type='mmr', content_field=
'title', filters=filter_query)
docs = retriever.get_relevant_documents('my docs')
assert len(docs) == 1
assert 'My document' in docs[0].page_content
assert 'id' in docs[0].metadata and 'year' in docs[0].metadata
assert 'other_emb' not in docs[0].metadata
assert docs[0].metadata['year'] <= 90 | @pytest.mark.parametrize('backend', ['init_hnsw', 'init_in_memory',
'init_qdrant', 'init_elastic', 'init_weaviate'])
def test_backends(request: Any, backend: Any) ->None:
index, filter_query, embeddings = request.getfixturevalue(backend)
retriever = DocArrayRetriever(index=index, embeddings=embeddings,
search_field='title_embedding', content_field='title')
docs = retriever.get_relevant_documents('my docs')
assert len(docs) == 1
assert 'My document' in docs[0].page_content
assert 'id' in docs[0].metadata and 'year' in docs[0].metadata
assert 'other_emb' not in docs[0].metadata
retriever = DocArrayRetriever(index=index, embeddings=embeddings,
search_field='title_embedding', content_field='title', filters=
filter_query)
docs = retriever.get_relevant_documents('my docs')
assert len(docs) == 1
assert 'My document' in docs[0].page_content
assert 'id' in docs[0].metadata and 'year' in docs[0].metadata
assert 'other_emb' not in docs[0].metadata
assert docs[0].metadata['year'] <= 90
retriever = DocArrayRetriever(index=index, embeddings=embeddings,
search_field='title_embedding', search_type='mmr', content_field=
'title', filters=filter_query)
docs = retriever.get_relevant_documents('my docs')
assert len(docs) == 1
assert 'My document' in docs[0].page_content
assert 'id' in docs[0].metadata and 'year' in docs[0].metadata
assert 'other_emb' not in docs[0].metadata
assert docs[0].metadata['year'] <= 90 | null |
test_default_index_from_documents | """This test checks the construction of a default
ElasticSearch index using the 'from_documents'."""
elastic_vector_search = ElasticVectorSearch.from_documents(documents=
documents, embedding=embedding_openai, elasticsearch_url=elasticsearch_url)
search_result = elastic_vector_search.similarity_search('sharks')
assert len(search_result) != 0 | @pytest.mark.vcr(ignore_localhost=True)
def test_default_index_from_documents(self, documents: List[Document],
embedding_openai: OpenAIEmbeddings, elasticsearch_url: str) ->None:
"""This test checks the construction of a default
ElasticSearch index using the 'from_documents'."""
elastic_vector_search = ElasticVectorSearch.from_documents(documents=
documents, embedding=embedding_openai, elasticsearch_url=
elasticsearch_url)
search_result = elastic_vector_search.similarity_search('sharks')
assert len(search_result) != 0 | This test checks the construction of a default
ElasticSearch index using the 'from_documents'. |
similarity_search_with_score_by_vector | try:
from sqlalchemy.engine import Row
except ImportError:
raise ImportError(
"Could not import Row from sqlalchemy.engine. Please 'pip install sqlalchemy>=1.4'."
)
filter_condition = ''
if filter is not None:
conditions = [f'metadata->>{key!r} = {value!r}' for key, value in
filter.items()]
filter_condition = f"WHERE {' AND '.join(conditions)}"
sql_query = f"""
SELECT *, l2_distance(embedding, :embedding) as distance
FROM {self.collection_name}
{filter_condition}
ORDER BY embedding <-> :embedding
LIMIT :k
"""
params = {'embedding': embedding, 'k': k}
with self.engine.connect() as conn:
results: Sequence[Row] = conn.execute(text(sql_query), params).fetchall()
documents_with_scores = [(Document(page_content=result.document, metadata=
result.metadata), result.distance if self.embedding_function is not
None else None) for result in results]
return documents_with_scores | def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, filter: Optional[dict]=None) ->List[Tuple[Document, float]]:
try:
from sqlalchemy.engine import Row
except ImportError:
raise ImportError(
"Could not import Row from sqlalchemy.engine. Please 'pip install sqlalchemy>=1.4'."
)
filter_condition = ''
if filter is not None:
conditions = [f'metadata->>{key!r} = {value!r}' for key, value in
filter.items()]
filter_condition = f"WHERE {' AND '.join(conditions)}"
sql_query = f"""
SELECT *, l2_distance(embedding, :embedding) as distance
FROM {self.collection_name}
{filter_condition}
ORDER BY embedding <-> :embedding
LIMIT :k
"""
params = {'embedding': embedding, 'k': k}
with self.engine.connect() as conn:
results: Sequence[Row] = conn.execute(text(sql_query), params
).fetchall()
documents_with_scores = [(Document(page_content=result.document,
metadata=result.metadata), result.distance if self.
embedding_function is not None else None) for result in results]
return documents_with_scores | null |
test_tags_are_appended | """Test tags from with_config are concatenated with those in invocation."""
foo = RunnableLambda(lambda x: x).with_config({'tags': ['my_key']})
with collect_runs() as cb:
foo.invoke('hi', {'tags': ['invoked_key']})
run = cb.traced_runs[0]
assert isinstance(run.tags, list)
assert sorted(run.tags) == sorted(['my_key', 'invoked_key']) | def test_tags_are_appended() ->None:
"""Test tags from with_config are concatenated with those in invocation."""
foo = RunnableLambda(lambda x: x).with_config({'tags': ['my_key']})
with collect_runs() as cb:
foo.invoke('hi', {'tags': ['invoked_key']})
run = cb.traced_runs[0]
assert isinstance(run.tags, list)
assert sorted(run.tags) == sorted(['my_key', 'invoked_key']) | Test tags from with_config are concatenated with those in invocation. |
get_relevant_documents | """Retrieve documents relevant to a query.
Args:
query: string to find relevant documents for
callbacks: Callback manager or list of callbacks
tags: Optional list of tags associated with the retriever. Defaults to None
These tags will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
metadata: Optional metadata associated with the retriever. Defaults to None
This metadata will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
Returns:
List of relevant documents
"""
from langchain_core.callbacks.manager import CallbackManager
callback_manager = CallbackManager.configure(callbacks, None, verbose=
kwargs.get('verbose', False), inheritable_tags=tags, local_tags=self.
tags, inheritable_metadata=metadata, local_metadata=self.metadata)
run_manager = callback_manager.on_retriever_start(dumpd(self), query, name=
run_name, **kwargs)
try:
_kwargs = kwargs if self._expects_other_args else {}
if self._new_arg_supported:
result = self._get_relevant_documents(query, run_manager=
run_manager, **_kwargs)
else:
result = self._get_relevant_documents(query, **_kwargs)
except Exception as e:
run_manager.on_retriever_error(e)
raise e
else:
run_manager.on_retriever_end(result, **kwargs)
return result | def get_relevant_documents(self, query: str, *, callbacks: Callbacks=None,
tags: Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None,
run_name: Optional[str]=None, **kwargs: Any) ->List[Document]:
"""Retrieve documents relevant to a query.
Args:
query: string to find relevant documents for
callbacks: Callback manager or list of callbacks
tags: Optional list of tags associated with the retriever. Defaults to None
These tags will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
metadata: Optional metadata associated with the retriever. Defaults to None
This metadata will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
Returns:
List of relevant documents
"""
from langchain_core.callbacks.manager import CallbackManager
callback_manager = CallbackManager.configure(callbacks, None, verbose=
kwargs.get('verbose', False), inheritable_tags=tags, local_tags=
self.tags, inheritable_metadata=metadata, local_metadata=self.metadata)
run_manager = callback_manager.on_retriever_start(dumpd(self), query,
name=run_name, **kwargs)
try:
_kwargs = kwargs if self._expects_other_args else {}
if self._new_arg_supported:
result = self._get_relevant_documents(query, run_manager=
run_manager, **_kwargs)
else:
result = self._get_relevant_documents(query, **_kwargs)
except Exception as e:
run_manager.on_retriever_error(e)
raise e
else:
run_manager.on_retriever_end(result, **kwargs)
return result | Retrieve documents relevant to a query.
Args:
query: string to find relevant documents for
callbacks: Callback manager or list of callbacks
tags: Optional list of tags associated with the retriever. Defaults to None
These tags will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
metadata: Optional metadata associated with the retriever. Defaults to None
This metadata will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
Returns:
List of relevant documents |
test_include_types | structured_schema = {'node_props': {'Movie': [{'property': 'title', 'type':
'STRING'}], 'Actor': [{'property': 'name', 'type': 'STRING'}], 'Person':
[{'property': 'name', 'type': 'STRING'}]}, 'rel_props': {},
'relationships': [{'start': 'Actor', 'end': 'Movie', 'type': 'ACTED_IN'
}, {'start': 'Person', 'end': 'Movie', 'type': 'DIRECTED'}]}
include_types = ['Movie', 'Actor', 'ACTED_IN']
output = construct_schema(structured_schema, include_types, [])
expected_schema = """Node properties are the following:
Movie {title: STRING},Actor {name: STRING}
Relationship properties are the following:
The relationships are the following:
(:Actor)-[:ACTED_IN]->(:Movie)"""
assert output == expected_schema | def test_include_types() ->None:
structured_schema = {'node_props': {'Movie': [{'property': 'title',
'type': 'STRING'}], 'Actor': [{'property': 'name', 'type': 'STRING'
}], 'Person': [{'property': 'name', 'type': 'STRING'}]},
'rel_props': {}, 'relationships': [{'start': 'Actor', 'end':
'Movie', 'type': 'ACTED_IN'}, {'start': 'Person', 'end': 'Movie',
'type': 'DIRECTED'}]}
include_types = ['Movie', 'Actor', 'ACTED_IN']
output = construct_schema(structured_schema, include_types, [])
expected_schema = """Node properties are the following:
Movie {title: STRING},Actor {name: STRING}
Relationship properties are the following:
The relationships are the following:
(:Actor)-[:ACTED_IN]->(:Movie)"""
assert output == expected_schema | null |
test_hologres_with_filter_distant_match | """Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(texts=texts, table_name='test_table_filter',
embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas,
connection_string=CONNECTION_STRING, pre_delete_table=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'2'})
assert output == [(Document(page_content='baz', metadata={'page': '2'}), 4.0)] | def test_hologres_with_filter_distant_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(texts=texts, table_name=
'test_table_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_table=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '2'})
assert output == [(Document(page_content='baz', metadata={'page': '2'}),
4.0)] | Test end to end construction and search. |
test_provider_not_available | mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [{'error': {'message':
"""Amazon has returned an error:
An error occurred (TextSizeLimitExceededException)
when calling the DetectTargetedSentiment
operation: Input text size exceeds limit.
Max length of request text allowed is 5000 bytes
while in this request the text size is 47380 bytes"""
, 'type': 'ProviderInvalidInputTextLengthError'}, 'status': 'fail',
'provider': 'amazon', 'provider_status_code': 400, 'cost': 0.0}]
mock_post.return_value = mock_response
with pytest.raises(ValueError):
tool._run('some query') | def test_provider_not_available(mock_post: MagicMock) ->None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [{'error': {'message':
"""Amazon has returned an error:
An error occurred (TextSizeLimitExceededException)
when calling the DetectTargetedSentiment
operation: Input text size exceeds limit.
Max length of request text allowed is 5000 bytes
while in this request the text size is 47380 bytes"""
, 'type': 'ProviderInvalidInputTextLengthError'}, 'status': 'fail',
'provider': 'amazon', 'provider_status_code': 400, 'cost': 0.0}]
mock_post.return_value = mock_response
with pytest.raises(ValueError):
tool._run('some query') | null |
get_connection_string | connection_string: str = get_from_dict_or_env(data=kwargs, key=
'connection_string', env_key='POSTGRES_CONNECTION_STRING')
if not connection_string:
raise ValueError(
'Postgres connection string is requiredEither pass it as a parameteror set the POSTGRES_CONNECTION_STRING environment variable.'
)
return connection_string | @classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) ->str:
connection_string: str = get_from_dict_or_env(data=kwargs, key=
'connection_string', env_key='POSTGRES_CONNECTION_STRING')
if not connection_string:
raise ValueError(
'Postgres connection string is requiredEither pass it as a parameteror set the POSTGRES_CONNECTION_STRING environment variable.'
)
return connection_string | null |
_AnnAssign | self.fill()
if not t.simple and isinstance(t.target, ast.Name):
self.write('(')
self.dispatch(t.target)
if not t.simple and isinstance(t.target, ast.Name):
self.write(')')
self.write(': ')
self.dispatch(t.annotation)
if t.value:
self.write(' = ')
self.dispatch(t.value) | def _AnnAssign(self, t):
self.fill()
if not t.simple and isinstance(t.target, ast.Name):
self.write('(')
self.dispatch(t.target)
if not t.simple and isinstance(t.target, ast.Name):
self.write(')')
self.write(': ')
self.dispatch(t.annotation)
if t.value:
self.write(' = ')
self.dispatch(t.value) | null |
test_string_metadata | """Verify string metadata is loaded correctly"""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'tags_and_frontmatter.md')
assert doc.metadata['aString'] == 'string value' | def test_string_metadata() ->None:
"""Verify string metadata is loaded correctly"""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'tags_and_frontmatter.md')
assert doc.metadata['aString'] == 'string value' | Verify string metadata is loaded correctly |
test_agent_iterator_empty_input | """Test AgentExecutorIterator with empty input."""
agent = _get_agent()
agent_iter = agent.iter(inputs='')
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]['output'] | def test_agent_iterator_empty_input() ->None:
"""Test AgentExecutorIterator with empty input."""
agent = _get_agent()
agent_iter = agent.iter(inputs='')
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]['output'] | Test AgentExecutorIterator with empty input. |
similarity_search | """Run similarity search on query
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
"""
async def _similarity_search() ->List[Document]:
await self.initialize()
return await self.asimilarity_search(query, k, **kwargs)
return asyncio.run(_similarity_search()) | def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Run similarity search on query
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
"""
async def _similarity_search() ->List[Document]:
await self.initialize()
return await self.asimilarity_search(query, k, **kwargs)
return asyncio.run(_similarity_search()) | Run similarity search on query
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query |
create_python_agent | """Construct a python agent from an LLM and tool."""
tools = [tool]
agent: BaseSingleActionAgent
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **
kwargs)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
system_message = SystemMessage(content=prefix)
_prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
agent = OpenAIFunctionsAgent(llm=llm, prompt=_prompt, tools=tools,
callback_manager=callback_manager, **kwargs)
else:
raise ValueError(f'Agent type {agent_type} not supported at the moment.')
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose, **
agent_executor_kwargs or {}) | def create_python_agent(llm: BaseLanguageModel, tool: PythonREPLTool,
agent_type: AgentType=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager: Optional[BaseCallbackManager]=None, verbose: bool=
False, prefix: str=PREFIX, agent_executor_kwargs: Optional[Dict[str,
Any]]=None, **kwargs: Dict[str, Any]) ->AgentExecutor:
"""Construct a python agent from an LLM and tool."""
tools = [tool]
agent: BaseSingleActionAgent
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names,
**kwargs)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
system_message = SystemMessage(content=prefix)
_prompt = OpenAIFunctionsAgent.create_prompt(system_message=
system_message)
agent = OpenAIFunctionsAgent(llm=llm, prompt=_prompt, tools=tools,
callback_manager=callback_manager, **kwargs)
else:
raise ValueError(
f'Agent type {agent_type} not supported at the moment.')
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose, **
agent_executor_kwargs or {}) | Construct a python agent from an LLM and tool. |
get_colored_text | """Get colored text."""
color_str = _TEXT_COLOR_MAPPING[color]
return f'\x1b[{color_str}m\x1b[1;3m{text}\x1b[0m' | def get_colored_text(text: str, color: str) ->str:
"""Get colored text."""
color_str = _TEXT_COLOR_MAPPING[color]
return f'\x1b[{color_str}m\x1b[1;3m{text}\x1b[0m' | Get colored text. |
validate_environment | """Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment.
"""
apify_api_token = get_from_dict_or_env(values, 'apify_api_token',
'APIFY_API_TOKEN')
try:
from apify_client import ApifyClient, ApifyClientAsync
values['apify_client'] = ApifyClient(apify_api_token)
values['apify_client_async'] = ApifyClientAsync(apify_api_token)
except ImportError:
raise ImportError(
'Could not import apify-client Python package. Please install it with `pip install apify-client`.'
)
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment.
"""
apify_api_token = get_from_dict_or_env(values, 'apify_api_token',
'APIFY_API_TOKEN')
try:
from apify_client import ApifyClient, ApifyClientAsync
values['apify_client'] = ApifyClient(apify_api_token)
values['apify_client_async'] = ApifyClientAsync(apify_api_token)
except ImportError:
raise ImportError(
'Could not import apify-client Python package. Please install it with `pip install apify-client`.'
)
return values | Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment. |
_format_func | self._validate_func(func)
value = func.value
if isinstance(func, Comparator):
value = COMPARATOR_TO_BER[func]
return f'{value}' | def _format_func(self, func: Union[Operator, Comparator]) ->str:
self._validate_func(func)
value = func.value
if isinstance(func, Comparator):
value = COMPARATOR_TO_BER[func]
return f'{value}' | null |
embed_query | """
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
return [0.0, 0.0] | def embed_query(self, text: str) ->List[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
return [0.0, 0.0] | Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] ! |
test_no_result_call | """Test that call gives no result."""
search = GoogleSearchAPIWrapper()
output = search.run(
'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL'
)
print(type(output))
assert 'No good Google Search Result was found' == output | def test_no_result_call() ->None:
"""Test that call gives no result."""
search = GoogleSearchAPIWrapper()
output = search.run(
'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL'
)
print(type(output))
assert 'No good Google Search Result was found' == output | Test that call gives no result. |
identity | """An identity function"""
return x | def identity(x: Other) ->Other:
"""An identity function"""
return x | An identity function |
_parse_output | """Parse the output of the query to a markdown table."""
if 'results' in pbi_result:
rows = pbi_result['results'][0]['tables'][0]['rows']
if len(rows) == 0:
logger.info('0 records in result, query was valid.')
return (None,
'0 rows returned, this might be correct, but please validate if all filter values were correct?'
)
result = json_to_md(rows)
too_long, length = self._result_too_large(result)
if too_long:
return (
f'Result too large, please try to be more specific or use the `TOPN` function. The result is {length} tokens long, the limit is {self.output_token_limit} tokens.'
, None)
return result, None
if 'error' in pbi_result:
if 'pbi.error' in pbi_result['error'] and 'details' in pbi_result['error'][
'pbi.error']:
return None, pbi_result['error']['pbi.error']['details'][0]['detail']
return None, pbi_result['error']
return None, pbi_result | def _parse_output(self, pbi_result: Dict[str, Any]) ->Tuple[Optional[str],
Optional[Any]]:
"""Parse the output of the query to a markdown table."""
if 'results' in pbi_result:
rows = pbi_result['results'][0]['tables'][0]['rows']
if len(rows) == 0:
logger.info('0 records in result, query was valid.')
return (None,
'0 rows returned, this might be correct, but please validate if all filter values were correct?'
)
result = json_to_md(rows)
too_long, length = self._result_too_large(result)
if too_long:
return (
f'Result too large, please try to be more specific or use the `TOPN` function. The result is {length} tokens long, the limit is {self.output_token_limit} tokens.'
, None)
return result, None
if 'error' in pbi_result:
if 'pbi.error' in pbi_result['error'] and 'details' in pbi_result[
'error']['pbi.error']:
return None, pbi_result['error']['pbi.error']['details'][0][
'detail']
return None, pbi_result['error']
return None, pbi_result | Parse the output of the query to a markdown table. |
__init__ | """
Initialize ReadTheDocsLoader
The loader loops over all files under `path` and extracts the actual content of
the files by retrieving main html tags. Default main html tags include
`<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You
can also define your own html tags by passing custom_html_tag, e.g.
`("div", "class=main")`. The loader iterates html tags with the order of
custom html tags (if exists) and default html tags. If any of the tags is not
empty, the loop will break and retrieve the content out of that tag.
Args:
path: The location of pulled readthedocs folder.
encoding: The encoding with which to open the documents.
errors: Specify how encoding and decoding errors are to be handled—this
cannot be used in binary mode.
custom_html_tag: Optional custom html tag to retrieve the content from
files.
patterns: The file patterns to load, passed to `glob.rglob`.
exclude_links_ratio: The ratio of links:content to exclude pages from.
This is to reduce the frequency at which index pages make their
way into retrieved results. Recommended: 0.5
kwargs: named arguments passed to `bs4.BeautifulSoup`.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
'Could not import python packages. Please install it with `pip install beautifulsoup4`. '
)
try:
_ = BeautifulSoup('<html><body>Parser builder library test.</body></html>',
'html.parser', **kwargs)
except Exception as e:
raise ValueError('Parsing kwargs do not appear valid') from e
self.file_path = Path(path)
self.encoding = encoding
self.errors = errors
self.custom_html_tag = custom_html_tag
self.patterns = patterns
self.bs_kwargs = kwargs
self.exclude_links_ratio = exclude_links_ratio | def __init__(self, path: Union[str, Path], encoding: Optional[str]=None,
errors: Optional[str]=None, custom_html_tag: Optional[Tuple[str, dict]]
=None, patterns: Sequence[str]=('*.htm', '*.html'), exclude_links_ratio:
float=1.0, **kwargs: Optional[Any]):
"""
Initialize ReadTheDocsLoader
The loader loops over all files under `path` and extracts the actual content of
the files by retrieving main html tags. Default main html tags include
`<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You
can also define your own html tags by passing custom_html_tag, e.g.
`("div", "class=main")`. The loader iterates html tags with the order of
custom html tags (if exists) and default html tags. If any of the tags is not
empty, the loop will break and retrieve the content out of that tag.
Args:
path: The location of pulled readthedocs folder.
encoding: The encoding with which to open the documents.
errors: Specify how encoding and decoding errors are to be handled—this
cannot be used in binary mode.
custom_html_tag: Optional custom html tag to retrieve the content from
files.
patterns: The file patterns to load, passed to `glob.rglob`.
exclude_links_ratio: The ratio of links:content to exclude pages from.
This is to reduce the frequency at which index pages make their
way into retrieved results. Recommended: 0.5
kwargs: named arguments passed to `bs4.BeautifulSoup`.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
'Could not import python packages. Please install it with `pip install beautifulsoup4`. '
)
try:
_ = BeautifulSoup(
'<html><body>Parser builder library test.</body></html>',
'html.parser', **kwargs)
except Exception as e:
raise ValueError('Parsing kwargs do not appear valid') from e
self.file_path = Path(path)
self.encoding = encoding
self.errors = errors
self.custom_html_tag = custom_html_tag
self.patterns = patterns
self.bs_kwargs = kwargs
self.exclude_links_ratio = exclude_links_ratio | Initialize ReadTheDocsLoader
The loader loops over all files under `path` and extracts the actual content of
the files by retrieving main html tags. Default main html tags include
`<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You
can also define your own html tags by passing custom_html_tag, e.g.
`("div", "class=main")`. The loader iterates html tags with the order of
custom html tags (if exists) and default html tags. If any of the tags is not
empty, the loop will break and retrieve the content out of that tag.
Args:
path: The location of pulled readthedocs folder.
encoding: The encoding with which to open the documents.
errors: Specify how encoding and decoding errors are to be handled—this
cannot be used in binary mode.
custom_html_tag: Optional custom html tag to retrieve the content from
files.
patterns: The file patterns to load, passed to `glob.rglob`.
exclude_links_ratio: The ratio of links:content to exclude pages from.
This is to reduce the frequency at which index pages make their
way into retrieved results. Recommended: 0.5
kwargs: named arguments passed to `bs4.BeautifulSoup`. |
render_text_description | """Render the tool name and description in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search
calculator: This tool is used for math
"""
return '\n'.join([f'{tool.name}: {tool.description}' for tool in tools]) | def render_text_description(tools: List[BaseTool]) ->str:
"""Render the tool name and description in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search
calculator: This tool is used for math
"""
return '\n'.join([f'{tool.name}: {tool.description}' for tool in tools]) | Render the tool name and description in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search
calculator: This tool is used for math |
requires_reference | """Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return False | @property
def requires_reference(self) ->bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return False | Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise. |
vector_dtype | return REDIS_VECTOR_DTYPE_MAP[self.content_vector.datatype] | @property
def vector_dtype(self) ->np.dtype:
return REDIS_VECTOR_DTYPE_MAP[self.content_vector.datatype] | null |
test_with_fallback_parser | class FirstCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the first character of a blob."""
yield Document(page_content=blob.as_string()[0])
class SecondCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the second character of a blob."""
yield Document(page_content=blob.as_string()[1])
class ThirdCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the third character of a blob."""
yield Document(page_content=blob.as_string()[2])
parser = MimeTypeBasedParser(handlers={'text/plain': FirstCharParser(),
'text/html': SecondCharParser()}, fallback_parser=ThirdCharParser())
blob = Blob(data=b'Hello World', mimetype='text/plain')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'H'
blob = Blob(data=b'Hello World', mimetype='text/html')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'e'
blob = Blob(data=b'Hello World', mimetype='text/csv')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'l' | def test_with_fallback_parser(self) ->None:
class FirstCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the first character of a blob."""
yield Document(page_content=blob.as_string()[0])
class SecondCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the second character of a blob."""
yield Document(page_content=blob.as_string()[1])
class ThirdCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the third character of a blob."""
yield Document(page_content=blob.as_string()[2])
parser = MimeTypeBasedParser(handlers={'text/plain': FirstCharParser(),
'text/html': SecondCharParser()}, fallback_parser=ThirdCharParser())
blob = Blob(data=b'Hello World', mimetype='text/plain')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'H'
blob = Blob(data=b'Hello World', mimetype='text/html')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'e'
blob = Blob(data=b'Hello World', mimetype='text/csv')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'l' | null |
create_openapi_agent | """Construct an OpenAPI agent from an LLM and tools.
*Security Note*: When creating an OpenAPI agent, check the permissions
and capabilities of the underlying toolkit.
For example, if the default implementation of OpenAPIToolkit
uses the RequestsToolkit which contains tools to make arbitrary
network requests against any URL (e.g., GET, POST, PATCH, PUT, DELETE),
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prompt_params = {'format_instructions': format_instructions
} if format_instructions is not None else {}
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=suffix,
input_variables=input_variables, **prompt_params)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose,
return_intermediate_steps=return_intermediate_steps, max_iterations=
max_iterations, max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method, **agent_executor_kwargs or {}) | def create_openapi_agent(llm: BaseLanguageModel, toolkit: OpenAPIToolkit,
callback_manager: Optional[BaseCallbackManager]=None, prefix: str=
OPENAPI_PREFIX, suffix: str=OPENAPI_SUFFIX, format_instructions:
Optional[str]=None, input_variables: Optional[List[str]]=None,
max_iterations: Optional[int]=15, max_execution_time: Optional[float]=
None, early_stopping_method: str='force', verbose: bool=False,
return_intermediate_steps: bool=False, agent_executor_kwargs: Optional[
Dict[str, Any]]=None, **kwargs: Any) ->AgentExecutor:
"""Construct an OpenAPI agent from an LLM and tools.
*Security Note*: When creating an OpenAPI agent, check the permissions
and capabilities of the underlying toolkit.
For example, if the default implementation of OpenAPIToolkit
uses the RequestsToolkit which contains tools to make arbitrary
network requests against any URL (e.g., GET, POST, PATCH, PUT, DELETE),
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prompt_params = {'format_instructions': format_instructions
} if format_instructions is not None else {}
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=
suffix, input_variables=input_variables, **prompt_params)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **
kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose,
return_intermediate_steps=return_intermediate_steps, max_iterations
=max_iterations, max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method, **
agent_executor_kwargs or {}) | Construct an OpenAPI agent from an LLM and tools.
*Security Note*: When creating an OpenAPI agent, check the permissions
and capabilities of the underlying toolkit.
For example, if the default implementation of OpenAPIToolkit
uses the RequestsToolkit which contains tools to make arbitrary
network requests against any URL (e.g., GET, POST, PATCH, PUT, DELETE),
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information. |
test_intervention_chain | """Test InterventionChain translates a hypothetical into a new value setting."""
llm = OpenAI(temperature=0, max_tokens=512)
story_conditions_chain = InterventionChain.from_univariate_prompt(llm)
question = 'if cindy has ten pets'
data = story_conditions_chain(question)[Constant.chain_data.value]
self.assertEqual(type(data), InterventionModel) | def test_intervention_chain(self) ->None:
"""Test InterventionChain translates a hypothetical into a new value setting."""
llm = OpenAI(temperature=0, max_tokens=512)
story_conditions_chain = InterventionChain.from_univariate_prompt(llm)
question = 'if cindy has ten pets'
data = story_conditions_chain(question)[Constant.chain_data.value]
self.assertEqual(type(data), InterventionModel) | Test InterventionChain translates a hypothetical into a new value setting. |
__init__ | """Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ImportError(
'Could not import tiktoken python package. This is needed in order to for TokenTextSplitter. Please install it with `pip install tiktoken`.'
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special | def __init__(self, encoding_name: str='gpt2', model_name: Optional[str]=
None, allowed_special: Union[Literal['all'], AbstractSet[str]]=set(),
disallowed_special: Union[Literal['all'], Collection[str]]='all', **
kwargs: Any) ->None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ImportError(
'Could not import tiktoken python package. This is needed in order to for TokenTextSplitter. Please install it with `pip install tiktoken`.'
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special | Create a new TextSplitter. |
_combine_documents | doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings) | def _combine_documents(docs, document_prompt=DOCUMENT_PROMPT,
document_separator='\n\n'):
doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings) | null |
_convert_delta_to_message_chunk | role = _dict.get('role')
content = _dict.get('content') or ''
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content) | def _convert_delta_to_message_chunk(_dict: Mapping[str, Any], default_class:
Type[BaseMessageChunk]) ->BaseMessageChunk:
role = _dict.get('role')
content = _dict.get('content') or ''
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content) | null |
from_documents | """Return Vearch VectorStore"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts=texts, embedding=embedding, metadatas=metadatas,
path_or_url=path_or_url, table_name=table_name, db_name=db_name, flag=
flag, **kwargs) | @classmethod
def from_documents(cls: Type[Vearch], documents: List[Document], embedding:
Embeddings, path_or_url: Optional[str]=None, table_name: str=
_DEFAULT_TABLE_NAME, db_name: str=_DEFAULT_CLUSTER_DB_NAME, flag: int=
_DEFAULT_VERSION, **kwargs: Any) ->Vearch:
"""Return Vearch VectorStore"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts=texts, embedding=embedding, metadatas=
metadatas, path_or_url=path_or_url, table_name=table_name, db_name=
db_name, flag=flag, **kwargs) | Return Vearch VectorStore |
from_llm | """Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
_cypher_prompt = cypher_prompt or PROMPT_SELECTOR.get_prompt(llm)
cypher_generation_chain = LLMChain(llm=llm, prompt=_cypher_prompt)
return cls(qa_chain=qa_chain, cypher_generation_chain=
cypher_generation_chain, extra_instructions=extra_instructions, **kwargs) | @classmethod
def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate=
CYPHER_QA_PROMPT, cypher_prompt: Optional[BasePromptTemplate]=None,
extra_instructions: Optional[str]=None, **kwargs: Any
) ->NeptuneOpenCypherQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
_cypher_prompt = cypher_prompt or PROMPT_SELECTOR.get_prompt(llm)
cypher_generation_chain = LLMChain(llm=llm, prompt=_cypher_prompt)
return cls(qa_chain=qa_chain, cypher_generation_chain=
cypher_generation_chain, extra_instructions=extra_instructions, **
kwargs) | Initialize from LLM. |
ExactRetrievalStrategy | """Used to perform brute force / exact
nearest neighbor search via script_score."""
return ExactRetrievalStrategy() | @staticmethod
def ExactRetrievalStrategy() ->'ExactRetrievalStrategy':
"""Used to perform brute force / exact
nearest neighbor search via script_score."""
return ExactRetrievalStrategy() | Used to perform brute force / exact
nearest neighbor search via script_score. |
add_texts | """Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts = list(texts)
embeddings = self._embed_documents(texts)
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids) | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts = list(texts)
embeddings = self._embed_documents(texts)
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids) | Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore. |
load | try:
import vowpal_wabbit_next as vw
except ImportError as e:
raise ImportError(
'Unable to import vowpal_wabbit_next, please install with `pip install vowpal_wabbit_next`.'
) from e
model_data = None
if self.model_path.exists():
with open(self.model_path, 'rb') as f:
model_data = f.read()
if model_data:
logger.info(f'rl_chain model is loaded from: {self.model_path}')
return vw.Workspace(commandline, model_data=model_data)
return vw.Workspace(commandline) | def load(self, commandline: List[str]) ->'vw.Workspace':
try:
import vowpal_wabbit_next as vw
except ImportError as e:
raise ImportError(
'Unable to import vowpal_wabbit_next, please install with `pip install vowpal_wabbit_next`.'
) from e
model_data = None
if self.model_path.exists():
with open(self.model_path, 'rb') as f:
model_data = f.read()
if model_data:
logger.info(f'rl_chain model is loaded from: {self.model_path}')
return vw.Workspace(commandline, model_data=model_data)
return vw.Workspace(commandline) | null |
validate_environments | """Validate Arcee environment variables."""
values['arcee_api_key'] = convert_to_secret_str(get_from_dict_or_env(values,
'arcee_api_key', 'ARCEE_API_KEY'))
values['arcee_api_url'] = get_from_dict_or_env(values, 'arcee_api_url',
'ARCEE_API_URL')
values['arcee_app_url'] = get_from_dict_or_env(values, 'arcee_app_url',
'ARCEE_APP_URL')
values['arcee_api_version'] = get_from_dict_or_env(values,
'arcee_api_version', 'ARCEE_API_VERSION')
if values['model_kwargs']:
kw = values['model_kwargs']
if kw.get('size') is not None:
if not kw.get('size') >= 0:
raise ValueError('`size` must not be negative.')
if kw.get('filters') is not None:
if not isinstance(kw.get('filters'), List):
raise ValueError('`filters` must be a list.')
for f in kw.get('filters'):
DALMFilter(**f)
return values | @root_validator()
def validate_environments(cls, values: Dict) ->Dict:
"""Validate Arcee environment variables."""
values['arcee_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'arcee_api_key', 'ARCEE_API_KEY'))
values['arcee_api_url'] = get_from_dict_or_env(values, 'arcee_api_url',
'ARCEE_API_URL')
values['arcee_app_url'] = get_from_dict_or_env(values, 'arcee_app_url',
'ARCEE_APP_URL')
values['arcee_api_version'] = get_from_dict_or_env(values,
'arcee_api_version', 'ARCEE_API_VERSION')
if values['model_kwargs']:
kw = values['model_kwargs']
if kw.get('size') is not None:
if not kw.get('size') >= 0:
raise ValueError('`size` must not be negative.')
if kw.get('filters') is not None:
if not isinstance(kw.get('filters'), List):
raise ValueError('`filters` must be a list.')
for f in kw.get('filters'):
DALMFilter(**f)
return values | Validate Arcee environment variables. |
test_memory_with_message_store | try:
from streamlit.testing.script_interactions import InteractiveScriptTests
except ModuleNotFoundError:
pytest.skip('Incorrect version of Streamlit installed')
test_handler = InteractiveScriptTests()
test_handler.setUp()
try:
sr = test_handler.script_from_string(test_script).run()
except TypeError:
sr = test_handler.script_from_string('memory_test.py', test_script).run()
messages_json = sr.get('text')[-1].value
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
sr = sr.get('checkbox')[0].uncheck().run()
assert sr.get('markdown')[0].value == 'Skipped add'
messages_json = sr.get('text')[-1].value
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
sr = sr.get('checkbox')[1].check().run()
assert sr.get('markdown')[1].value == 'Cleared!'
messages_json = sr.get('text')[-1].value
assert messages_json == '[]' | @pytest.mark.requires('streamlit')
def test_memory_with_message_store() ->None:
try:
from streamlit.testing.script_interactions import InteractiveScriptTests
except ModuleNotFoundError:
pytest.skip('Incorrect version of Streamlit installed')
test_handler = InteractiveScriptTests()
test_handler.setUp()
try:
sr = test_handler.script_from_string(test_script).run()
except TypeError:
sr = test_handler.script_from_string('memory_test.py', test_script
).run()
messages_json = sr.get('text')[-1].value
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
sr = sr.get('checkbox')[0].uncheck().run()
assert sr.get('markdown')[0].value == 'Skipped add'
messages_json = sr.get('text')[-1].value
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
sr = sr.get('checkbox')[1].check().run()
assert sr.get('markdown')[1].value == 'Cleared!'
messages_json = sr.get('text')[-1].value
assert messages_json == '[]' | null |
_import_gpt4all | from langchain_community.llms.gpt4all import GPT4All
return GPT4All | def _import_gpt4all() ->Any:
from langchain_community.llms.gpt4all import GPT4All
return GPT4All | null |
format_request_payload | """Formats the request according to the chosen api"""
return str.encode(prompt) | def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes:
"""Formats the request according to the chosen api"""
return str.encode(prompt) | Formats the request according to the chosen api |
_stream | response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
for i_c, c in enumerate(response):
if self.sleep is not None:
time.sleep(self.sleep)
if (self.error_on_chunk_number is not None and i_c == self.
error_on_chunk_number):
raise Exception('Fake error')
yield ChatGenerationChunk(message=AIMessageChunk(content=c)) | def _stream(self, messages: List[BaseMessage], stop: Union[List[str], None]
=None, run_manager: Union[CallbackManagerForLLMRun, None]=None, **
kwargs: Any) ->Iterator[ChatGenerationChunk]:
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
for i_c, c in enumerate(response):
if self.sleep is not None:
time.sleep(self.sleep)
if (self.error_on_chunk_number is not None and i_c == self.
error_on_chunk_number):
raise Exception('Fake error')
yield ChatGenerationChunk(message=AIMessageChunk(content=c)) | null |
run | from diffusers.utils import load_image
try:
new_args = copy.deepcopy(self.args)
for k, v in new_args.items():
if k == 'image':
new_args['image'] = load_image(v)
if self.task in ['video_generator', 'image_generator', 'text_reader']:
self.product = self.tool(**new_args)
else:
self.result = self.tool(**new_args)
except Exception as e:
self.status = 'failed'
self.message = str(e)
self.status = 'completed'
self.save_product()
return self.result | def run(self) ->str:
from diffusers.utils import load_image
try:
new_args = copy.deepcopy(self.args)
for k, v in new_args.items():
if k == 'image':
new_args['image'] = load_image(v)
if self.task in ['video_generator', 'image_generator', 'text_reader']:
self.product = self.tool(**new_args)
else:
self.result = self.tool(**new_args)
except Exception as e:
self.status = 'failed'
self.message = str(e)
self.status = 'completed'
self.save_product()
return self.result | null |
test_failure_wrong_ticker | """Test that the tool fails."""
tool = YahooFinanceNewsTool()
query = 'NOT_A_COMPANY'
result = tool.run(query)
assert f'Company ticker {query} not found.' in result | def test_failure_wrong_ticker() ->None:
"""Test that the tool fails."""
tool = YahooFinanceNewsTool()
query = 'NOT_A_COMPANY'
result = tool.run(query)
assert f'Company ticker {query} not found.' in result | Test that the tool fails. |
similarity_search | """Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(embedding=embedding, k=k, filter=filter
) | def similarity_search(self, query: str, k: int=4, filter: Optional[dict]=
None, **kwargs: Any) ->List[Document]:
"""Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(embedding=embedding, k=k,
filter=filter) | Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query. |
_run | """Run the tool."""
try:
create_message = self._prepare_message(message, to, subject, cc=cc, bcc=bcc
)
send_message = self.api_resource.users().messages().send(userId='me',
body=create_message)
sent_message = send_message.execute()
return f"Message sent. Message Id: {sent_message['id']}"
except Exception as error:
raise Exception(f'An error occurred: {error}') | def _run(self, message: str, to: Union[str, List[str]], subject: str, cc:
Optional[Union[str, List[str]]]=None, bcc: Optional[Union[str, List[str
]]]=None, run_manager: Optional[CallbackManagerForToolRun]=None) ->str:
"""Run the tool."""
try:
create_message = self._prepare_message(message, to, subject, cc=cc,
bcc=bcc)
send_message = self.api_resource.users().messages().send(userId=
'me', body=create_message)
sent_message = send_message.execute()
return f"Message sent. Message Id: {sent_message['id']}"
except Exception as error:
raise Exception(f'An error occurred: {error}') | Run the tool. |
evaluate_run | if not run.inputs.get('chat_history'):
return EvaluationResult(key='response_effectiveness', comment=
'No chat history present.')
elif 'last_run_id' not in run.inputs:
return EvaluationResult(key='response_effectiveness', comment=
'No last run ID present.')
eval_grade: Optional[dict] = self.runnable.invoke(run.inputs)
target_run_id = run.inputs['last_run_id']
return EvaluationResult(**eval_grade, key='response_effectiveness',
target_run_id=target_run_id) | def evaluate_run(self, run: Run, example: Optional[Example]=None
) ->EvaluationResult:
if not run.inputs.get('chat_history'):
return EvaluationResult(key='response_effectiveness', comment=
'No chat history present.')
elif 'last_run_id' not in run.inputs:
return EvaluationResult(key='response_effectiveness', comment=
'No last run ID present.')
eval_grade: Optional[dict] = self.runnable.invoke(run.inputs)
target_run_id = run.inputs['last_run_id']
return EvaluationResult(**eval_grade, key='response_effectiveness',
target_run_id=target_run_id) | null |
validate_environment | """Validate that api key and python package exists in environment."""
pipeline_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'pipeline_api_key', 'PIPELINE_API_KEY'))
values['pipeline_api_key'] = pipeline_api_key
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
pipeline_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'pipeline_api_key', 'PIPELINE_API_KEY'))
values['pipeline_api_key'] = pipeline_api_key
return values | Validate that api key and python package exists in environment. |
build_model_kwargs | """Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
values['model_kwargs'] = build_extra_kwargs(extra, values,
all_required_field_names)
return values | @root_validator(pre=True)
def build_model_kwargs(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
values['model_kwargs'] = build_extra_kwargs(extra, values,
all_required_field_names)
return values | Build extra kwargs from additional params that were passed in. |
test_tool_usage | parser = XMLAgentOutputParser()
_input = '<tool>search</tool><tool_input>foo</tool_input>'
output = parser.invoke(_input)
expected_output = AgentAction(tool='search', tool_input='foo', log=_input)
assert output == expected_output
_input = '<tool>search</tool><tool_input>foo</tool_input>'
output = parser.invoke(_input)
expected_output = AgentAction(tool='search', tool_input='foo', log=_input)
assert output == expected_output | def test_tool_usage() ->None:
parser = XMLAgentOutputParser()
_input = '<tool>search</tool><tool_input>foo</tool_input>'
output = parser.invoke(_input)
expected_output = AgentAction(tool='search', tool_input='foo', log=_input)
assert output == expected_output
_input = '<tool>search</tool><tool_input>foo</tool_input>'
output = parser.invoke(_input)
expected_output = AgentAction(tool='search', tool_input='foo', log=_input)
assert output == expected_output | null |
_join_docs | text = separator.join(docs)
if self._strip_whitespace:
text = text.strip()
if text == '':
return None
else:
return text | def _join_docs(self, docs: List[str], separator: str) ->Optional[str]:
text = separator.join(docs)
if self._strip_whitespace:
text = text.strip()
if text == '':
return None
else:
return text | null |
get_service_url | service_url: str = get_from_dict_or_env(data=kwargs, key='service_url',
env_key='TIMESCALE_SERVICE_URL')
if not service_url:
raise ValueError(
'Postgres connection string is requiredEither pass it as a parameteror set the TIMESCALE_SERVICE_URL environment variable.'
)
return service_url | @classmethod
def get_service_url(cls, kwargs: Dict[str, Any]) ->str:
service_url: str = get_from_dict_or_env(data=kwargs, key='service_url',
env_key='TIMESCALE_SERVICE_URL')
if not service_url:
raise ValueError(
'Postgres connection string is requiredEither pass it as a parameteror set the TIMESCALE_SERVICE_URL environment variable.'
)
return service_url | null |
_get_docs | """Get docs."""
return self.retriever.get_relevant_documents(question, callbacks=
run_manager.get_child()) | def _get_docs(self, question: str, *, run_manager: CallbackManagerForChainRun
) ->List[Document]:
"""Get docs."""
return self.retriever.get_relevant_documents(question, callbacks=
run_manager.get_child()) | Get docs. |
test_parsers_public_api_correct | """Test public API of parsers for breaking changes."""
assert set(__all__) == {'AzureAIDocumentIntelligenceParser',
'BS4HTMLParser', 'DocAIParser', 'GrobidParser', 'LanguageParser',
'OpenAIWhisperParser', 'PyPDFParser', 'PDFMinerParser', 'PyMuPDFParser',
'PyPDFium2Parser', 'PDFPlumberParser'} | def test_parsers_public_api_correct() ->None:
"""Test public API of parsers for breaking changes."""
assert set(__all__) == {'AzureAIDocumentIntelligenceParser',
'BS4HTMLParser', 'DocAIParser', 'GrobidParser', 'LanguageParser',
'OpenAIWhisperParser', 'PyPDFParser', 'PDFMinerParser',
'PyMuPDFParser', 'PyPDFium2Parser', 'PDFPlumberParser'} | Test public API of parsers for breaking changes. |
build_extra | """Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values | @root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.
values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values | Build extra kwargs from additional params that were passed in. |
_import_jira | from langchain_community.utilities.jira import JiraAPIWrapper
return JiraAPIWrapper | def _import_jira() ->Any:
from langchain_community.utilities.jira import JiraAPIWrapper
return JiraAPIWrapper | null |
__init__ | """Instantiate FinalStreamingStdOutCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [token.strip() for token in self.
answer_prefix_tokens]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [''] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [''] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False | def __init__(self, *, answer_prefix_tokens: Optional[List[str]]=None,
strip_tokens: bool=True, stream_prefix: bool=False) ->None:
"""Instantiate FinalStreamingStdOutCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [token.strip() for token in
self.answer_prefix_tokens]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [''] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [''] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False | Instantiate FinalStreamingStdOutCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed? |
evaluation_name | """The name of the evaluation."""
return self.__class__.__name__ | @property
def evaluation_name(self) ->str:
"""The name of the evaluation."""
return self.__class__.__name__ | The name of the evaluation. |
similarity_search_with_relevance_scores | """Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents most similar to the query text
and cosine distance in float for each.
Lower score represents more similarity.
"""
q_str = self._build_qstr(self._embeddings.embed_query(query), k, where_str)
try:
return [(Document(page_content=r[self.config.column_map['text']],
metadata={k: r[k] for k in self.must_have_cols}), r['dist']) for r in
self.client.query(q_str).named_results()]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m')
return [] | def similarity_search_with_relevance_scores(self, query: str, k: int=4,
where_str: Optional[str]=None, **kwargs: Any) ->List[Tuple[Document, float]
]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents most similar to the query text
and cosine distance in float for each.
Lower score represents more similarity.
"""
q_str = self._build_qstr(self._embeddings.embed_query(query), k, where_str)
try:
return [(Document(page_content=r[self.config.column_map['text']],
metadata={k: r[k] for k in self.must_have_cols}), r['dist']) for
r in self.client.query(q_str).named_results()]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m'
)
return [] | Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents most similar to the query text
and cosine distance in float for each.
Lower score represents more similarity. |
test_from_filesystem_using_default_parser | """Use the default generic parser."""
loader = GenericLoader.from_filesystem(toy_dir, suffixes=['.txt'])
docs = loader.load()
assert len(docs) == 3
assert docs[0].page_content == 'This is a test.txt file.' | def test_from_filesystem_using_default_parser(toy_dir: str) ->None:
"""Use the default generic parser."""
loader = GenericLoader.from_filesystem(toy_dir, suffixes=['.txt'])
docs = loader.load()
assert len(docs) == 3
assert docs[0].page_content == 'This is a test.txt file.' | Use the default generic parser. |
test_visit_operation | op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.LT, attribute='abc', value='4')])
expected = '(( foo < 2 ) and ( bar == "baz" ) and ( abc < "4" ))'
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
op = Operation(operator=Operator.NOT, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2)])
expected = 'not(( foo < 2 ))'
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
op = Operation(operator=Operator.NOT, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.LT, attribute='abc', value='4')])
try:
DEFAULT_TRANSLATOR.visit_operation(op)
except ValueError as e:
assert str(e) == '"not" can have only one argument in Milvus'
else:
assert False, 'Expected exception not raised' | def test_visit_operation() ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.LT, attribute='abc', value='4')])
expected = '(( foo < 2 ) and ( bar == "baz" ) and ( abc < "4" ))'
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
op = Operation(operator=Operator.NOT, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2)])
expected = 'not(( foo < 2 ))'
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
op = Operation(operator=Operator.NOT, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.LT, attribute='abc', value='4')])
try:
DEFAULT_TRANSLATOR.visit_operation(op)
except ValueError as e:
assert str(e) == '"not" can have only one argument in Milvus'
else:
assert False, 'Expected exception not raised' | null |
_import_spark_sql_tool_BaseSparkSQLTool | from langchain_community.tools.spark_sql.tool import BaseSparkSQLTool
return BaseSparkSQLTool | def _import_spark_sql_tool_BaseSparkSQLTool() ->Any:
from langchain_community.tools.spark_sql.tool import BaseSparkSQLTool
return BaseSparkSQLTool | null |
load_response_generator | llm_chain = ResponseGenerationChain.from_llm(llm)
return ResponseGenerator(llm_chain=llm_chain) | def load_response_generator(llm: BaseLanguageModel) ->ResponseGenerator:
llm_chain = ResponseGenerationChain.from_llm(llm)
return ResponseGenerator(llm_chain=llm_chain) | null |
validate_since | if v:
try:
datetime.strptime(v, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError(
f"Invalid value for 'since'. Expected a date string in YYYY-MM-DDTHH:MM:SSZ format. Received: {v}"
)
return v | @validator('since')
def validate_since(cls, v: Optional[str]) ->Optional[str]:
if v:
try:
datetime.strptime(v, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError(
f"Invalid value for 'since'. Expected a date string in YYYY-MM-DDTHH:MM:SSZ format. Received: {v}"
)
return v | null |
test_cassandra_semantic_cache | session, keyspace = cassandra_connection
sem_cache = CassandraSemanticCache(session=session, keyspace=keyspace,
embedding=FakeEmbeddings())
set_llm_cache(sem_cache)
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
output = llm.generate(['bar'])
expected_output = LLMResult(generations=[[Generation(text='fizz')]],
llm_output={})
assert output == expected_output
sem_cache.clear()
output = llm.generate(['bar'])
assert output != expected_output
sem_cache.clear() | def test_cassandra_semantic_cache(cassandra_connection: Tuple[Any, str]
) ->None:
session, keyspace = cassandra_connection
sem_cache = CassandraSemanticCache(session=session, keyspace=keyspace,
embedding=FakeEmbeddings())
set_llm_cache(sem_cache)
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
output = llm.generate(['bar'])
expected_output = LLMResult(generations=[[Generation(text='fizz')]],
llm_output={})
assert output == expected_output
sem_cache.clear()
output = llm.generate(['bar'])
assert output != expected_output
sem_cache.clear() | null |
messages | """Return the messages that correspond to this observation."""
return [AIMessage(content=self.log)] | @property
def messages(self) ->Sequence[BaseMessage]:
"""Return the messages that correspond to this observation."""
return [AIMessage(content=self.log)] | Return the messages that correspond to this observation. |
chain | """Decorate a function to make it a Runnable.
Sets the name of the runnable to the name of the function.
Any runnables called by the function will be traced as dependencies.
Args:
func: A callable.
Returns:
A Runnable.
Example:
.. code-block:: python
from langchain_core.runnables import chain
from langchain_core.prompts import PromptTemplate
from langchain.llms import OpenAI
@chain
def my_func(fields):
prompt = PromptTemplate("Hello, {name}!")
llm = OpenAI()
formatted = prompt.invoke(**fields)
for chunk in llm.stream(formatted):
yield chunk
"""
return RunnableLambda(func) | def chain(func: Union[Callable[[Input], Output], Callable[[Input], Iterator
[Output]], Callable[[Input], Coroutine[Any, Any, Output]], Callable[[
Input], AsyncIterator[Output]]]) ->Runnable[Input, Output]:
"""Decorate a function to make it a Runnable.
Sets the name of the runnable to the name of the function.
Any runnables called by the function will be traced as dependencies.
Args:
func: A callable.
Returns:
A Runnable.
Example:
.. code-block:: python
from langchain_core.runnables import chain
from langchain_core.prompts import PromptTemplate
from langchain.llms import OpenAI
@chain
def my_func(fields):
prompt = PromptTemplate("Hello, {name}!")
llm = OpenAI()
formatted = prompt.invoke(**fields)
for chunk in llm.stream(formatted):
yield chunk
"""
return RunnableLambda(func) | Decorate a function to make it a Runnable.
Sets the name of the runnable to the name of the function.
Any runnables called by the function will be traced as dependencies.
Args:
func: A callable.
Returns:
A Runnable.
Example:
.. code-block:: python
from langchain_core.runnables import chain
from langchain_core.prompts import PromptTemplate
from langchain.llms import OpenAI
@chain
def my_func(fields):
prompt = PromptTemplate("Hello, {name}!")
llm = OpenAI()
formatted = prompt.invoke(**fields)
for chunk in llm.stream(formatted):
yield chunk |
test_nuclia_loader | with mock.patch(
'langchain_community.tools.nuclia.tool.NucliaUnderstandingAPI._run',
new_callable=fakerun):
nua = NucliaUnderstandingAPI(enable_ml=False)
loader = NucliaLoader('/whatever/file.mp3', nua)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'Hello World'
assert docs[0].metadata['file']['language'] == 'en'
assert len(docs[0].metadata['metadata']['metadata']['metadata'][
'paragraphs']) == 1 | @mock.patch.dict(os.environ, {'NUCLIA_NUA_KEY': '_a_key_'})
def test_nuclia_loader() ->None:
with mock.patch(
'langchain_community.tools.nuclia.tool.NucliaUnderstandingAPI._run',
new_callable=fakerun):
nua = NucliaUnderstandingAPI(enable_ml=False)
loader = NucliaLoader('/whatever/file.mp3', nua)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'Hello World'
assert docs[0].metadata['file']['language'] == 'en'
assert len(docs[0].metadata['metadata']['metadata']['metadata'][
'paragraphs']) == 1 | null |
search | """Search via direct lookup.
Args:
search: id of a document to search for.
Returns:
Document if found, else error message.
"""
if search not in self._dict:
return f'ID {search} not found.'
else:
return self._dict[search] | def search(self, search: str) ->Union[str, Document]:
"""Search via direct lookup.
Args:
search: id of a document to search for.
Returns:
Document if found, else error message.
"""
if search not in self._dict:
return f'ID {search} not found.'
else:
return self._dict[search] | Search via direct lookup.
Args:
search: id of a document to search for.
Returns:
Document if found, else error message. |
from_llm_and_typescript | """Get the request parser."""
output_parser = APIRequesterOutputParser()
prompt = PromptTemplate(template=REQUEST_TEMPLATE, output_parser=
output_parser, partial_variables={'schema': typescript_definition},
input_variables=['instructions'])
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs) | @classmethod
def from_llm_and_typescript(cls, llm: BaseLanguageModel,
typescript_definition: str, verbose: bool=True, **kwargs: Any) ->LLMChain:
"""Get the request parser."""
output_parser = APIRequesterOutputParser()
prompt = PromptTemplate(template=REQUEST_TEMPLATE, output_parser=
output_parser, partial_variables={'schema': typescript_definition},
input_variables=['instructions'])
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs) | Get the request parser. |
__test_iterative_text_splitter | chunk_size += 1 if keep_separator else 0
splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=0, separators=['X', 'Y'], keep_separator=keep_separator)
text = '....5X..3Y...4X....5Y...'
output = splitter.split_text(text)
for chunk in output:
assert len(chunk) <= chunk_size, f'Chunk is larger than {chunk_size}'
return output | def __test_iterative_text_splitter(chunk_size: int, keep_separator: bool
) ->List[str]:
chunk_size += 1 if keep_separator else 0
splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=0, separators=['X', 'Y'], keep_separator=keep_separator)
text = '....5X..3Y...4X....5Y...'
output = splitter.split_text(text)
for chunk in output:
assert len(chunk) <= chunk_size, f'Chunk is larger than {chunk_size}'
return output | null |
parse_date | if date_string is None:
return None
time_format = '%a %b %d %H:%M:%S %Y %z'
return datetime.strptime(date_string, time_format) | def parse_date(date_string: str) ->datetime:
if date_string is None:
return None
time_format = '%a %b %d %H:%M:%S %Y %z'
return datetime.strptime(date_string, time_format) | null |
_generate | should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts, run_manager=
run_manager, **params)
return self._create_chat_result(response) | def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts,
run_manager=run_manager, **params)
return self._create_chat_result(response) | null |
output_keys | """Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = _output_keys + ['source_documents']
if self.return_generated_question:
_output_keys = _output_keys + ['generated_question']
return _output_keys | @property
def output_keys(self) ->List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = _output_keys + ['source_documents']
if self.return_generated_question:
_output_keys = _output_keys + ['generated_question']
return _output_keys | Return the output keys.
:meta private: |
_FunctionDef | self.__FunctionDef_helper(t, 'def') | def _FunctionDef(self, t):
self.__FunctionDef_helper(t, 'def') | null |