method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
_YieldFrom | self.write('(')
self.write('yield from')
if t.value:
self.write(' ')
self.dispatch(t.value)
self.write(')') | def _YieldFrom(self, t):
self.write('(')
self.write('yield from')
if t.value:
self.write(' ')
self.dispatch(t.value)
self.write(')') | null |
get_table_info | all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f'table_names {missing_tables} not found in database')
all_table_names = table_names
tables = []
for table_name in all_table_names:
table_info = self._get_create_table_stmt(table_name)
if self._sample_rows_in_table_info:
table_info += '\n\n/*'
table_info += f'\n{self._get_sample_spark_rows(table_name)}\n'
table_info += '*/'
tables.append(table_info)
final_str = '\n\n'.join(tables)
return final_str | def get_table_info(self, table_names: Optional[List[str]]=None) ->str:
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(
f'table_names {missing_tables} not found in database')
all_table_names = table_names
tables = []
for table_name in all_table_names:
table_info = self._get_create_table_stmt(table_name)
if self._sample_rows_in_table_info:
table_info += '\n\n/*'
table_info += f'\n{self._get_sample_spark_rows(table_name)}\n'
table_info += '*/'
tables.append(table_info)
final_str = '\n\n'.join(tables)
return final_str | null |
_body | model_kwargs = self.model_kwargs or {}
model_kwargs = {**model_kwargs, **kwargs}
return {'input': prompt, **model_kwargs} | def _body(self, prompt: str, kwargs: Any) ->Dict:
model_kwargs = self.model_kwargs or {}
model_kwargs = {**model_kwargs, **kwargs}
return {'input': prompt, **model_kwargs} | null |
_Dict | self.write('{')
def write_key_value_pair(k, v):
self.dispatch(k)
self.write(': ')
self.dispatch(v)
def write_item(item):
k, v = item
if k is None:
self.write('**')
self.dispatch(v)
else:
write_key_value_pair(k, v)
interleave(lambda : self.write(', '), write_item, zip(t.keys, t.values))
self.write('}') | def _Dict(self, t):
self.write('{')
def write_key_value_pair(k, v):
self.dispatch(k)
self.write(': ')
self.dispatch(v)
def write_item(item):
k, v = item
if k is None:
self.write('**')
self.dispatch(v)
else:
write_key_value_pair(k, v)
interleave(lambda : self.write(', '), write_item, zip(t.keys, t.values))
self.write('}') | null |
_parse_response | if len(json_data) == 1:
result = self._parse_json(json_data[0])
else:
for entry in json_data:
if entry.get('provider') == 'eden-ai':
result = self._parse_json(entry)
return result | def _parse_response(self, json_data: list) ->str:
if len(json_data) == 1:
result = self._parse_json(json_data[0])
else:
for entry in json_data:
if entry.get('provider') == 'eden-ai':
result = self._parse_json(entry)
return result | null |
test_all_imports | assert sorted(EXPECTED_ALL) == sorted(__all__) | def test_all_imports() ->None:
assert sorted(EXPECTED_ALL) == sorted(__all__) | null |
add_texts | """Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
embeddings = []
create_index_if_not_exists = kwargs.get('create_index_if_not_exists', True)
ids = kwargs.get('ids', [str(uuid.uuid4()) for _ in texts])
refresh_indices = kwargs.get('refresh_indices', True)
requests = []
if self.embedding is not None:
embeddings = self.embedding.embed_documents(list(texts))
dims_length = len(embeddings[0])
if create_index_if_not_exists:
self._create_index_if_not_exists(dims_length=dims_length)
for i, (text, vector) in enumerate(zip(texts, embeddings)):
metadata = metadatas[i] if metadatas else {}
requests.append({'_op_type': 'index', '_index': self.index_name,
self.query_field: text, self.vector_query_field: vector,
'metadata': metadata, '_id': ids[i]})
else:
if create_index_if_not_exists:
self._create_index_if_not_exists()
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
requests.append({'_op_type': 'index', '_index': self.index_name,
self.query_field: text, 'metadata': metadata, '_id': ids[i]})
if len(requests) > 0:
try:
success, failed = bulk(self.client, requests, stats_only=True,
refresh=refresh_indices)
logger.debug(
f'Added {success} and failed to add {failed} texts to index')
logger.debug(f'added texts {ids} to index')
return ids
except BulkIndexError as e:
logger.error(f'Error adding texts: {e}')
firstError = e.errors[0].get('index', {}).get('error', {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug('No texts to add to index')
return [] | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[Any,
Any]]]=None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
embeddings = []
create_index_if_not_exists = kwargs.get('create_index_if_not_exists', True)
ids = kwargs.get('ids', [str(uuid.uuid4()) for _ in texts])
refresh_indices = kwargs.get('refresh_indices', True)
requests = []
if self.embedding is not None:
embeddings = self.embedding.embed_documents(list(texts))
dims_length = len(embeddings[0])
if create_index_if_not_exists:
self._create_index_if_not_exists(dims_length=dims_length)
for i, (text, vector) in enumerate(zip(texts, embeddings)):
metadata = metadatas[i] if metadatas else {}
requests.append({'_op_type': 'index', '_index': self.index_name,
self.query_field: text, self.vector_query_field: vector,
'metadata': metadata, '_id': ids[i]})
else:
if create_index_if_not_exists:
self._create_index_if_not_exists()
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
requests.append({'_op_type': 'index', '_index': self.index_name,
self.query_field: text, 'metadata': metadata, '_id': ids[i]})
if len(requests) > 0:
try:
success, failed = bulk(self.client, requests, stats_only=True,
refresh=refresh_indices)
logger.debug(
f'Added {success} and failed to add {failed} texts to index')
logger.debug(f'added texts {ids} to index')
return ids
except BulkIndexError as e:
logger.error(f'Error adding texts: {e}')
firstError = e.errors[0].get('index', {}).get('error', {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug('No texts to add to index')
return [] | Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore. |
_import_powerbi_tool_ListPowerBITool | from langchain_community.tools.powerbi.tool import ListPowerBITool
return ListPowerBITool | def _import_powerbi_tool_ListPowerBITool() ->Any:
from langchain_community.tools.powerbi.tool import ListPowerBITool
return ListPowerBITool | null |
test_openai_streaming_multiple_prompts_error | """Test validation for streaming fails if multiple prompts are given."""
with pytest.raises(ValueError):
OpenAI(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"]) | def test_openai_streaming_multiple_prompts_error() ->None:
"""Test validation for streaming fails if multiple prompts are given."""
with pytest.raises(ValueError):
OpenAI(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"]) | Test validation for streaming fails if multiple prompts are given. |
format_docs | return '\n\n'.join(f"""Source {i}:
{doc.page_content}""" for i, doc in
enumerate(docs)) | def format_docs(docs):
return '\n\n'.join(f'Source {i}:\n{doc.page_content}' for i, doc in
enumerate(docs)) | null |
parse_result | if len(result) != 1:
raise OutputParserException(
f'Expected exactly one result, but got {len(result)}')
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
'This output parser can only be used with a chat generation.')
message = generation.message
if 'function_call' not in message.additional_kwargs:
return None
try:
function_call = message.additional_kwargs['function_call']
except KeyError as exc:
if partial:
return None
else:
raise OutputParserException(f'Could not parse function call: {exc}')
try:
if partial:
if self.args_only:
return parse_partial_json(function_call['arguments'], strict=
self.strict)
else:
return {**function_call, 'arguments': parse_partial_json(
function_call['arguments'], strict=self.strict)}
elif self.args_only:
try:
return json.loads(function_call['arguments'], strict=self.strict)
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f'Could not parse function call data: {exc}')
else:
try:
return {**function_call, 'arguments': json.loads(function_call[
'arguments'], strict=self.strict)}
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f'Could not parse function call data: {exc}')
except KeyError:
return None | def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
if len(result) != 1:
raise OutputParserException(
f'Expected exactly one result, but got {len(result)}')
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
'This output parser can only be used with a chat generation.')
message = generation.message
if 'function_call' not in message.additional_kwargs:
return None
try:
function_call = message.additional_kwargs['function_call']
except KeyError as exc:
if partial:
return None
else:
raise OutputParserException(f'Could not parse function call: {exc}'
)
try:
if partial:
if self.args_only:
return parse_partial_json(function_call['arguments'],
strict=self.strict)
else:
return {**function_call, 'arguments': parse_partial_json(
function_call['arguments'], strict=self.strict)}
elif self.args_only:
try:
return json.loads(function_call['arguments'], strict=self.
strict)
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f'Could not parse function call data: {exc}')
else:
try:
return {**function_call, 'arguments': json.loads(
function_call['arguments'], strict=self.strict)}
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f'Could not parse function call data: {exc}')
except KeyError:
return None | null |
lazy_parse | """Lazily parse the blob."""
import fitz
with blob.as_bytes_io() as file_path:
if blob.data is None:
doc = fitz.open(file_path)
else:
doc = fitz.open(stream=file_path, filetype='pdf')
yield from [Document(page_content=page.get_text(**self.text_kwargs) +
self._extract_images_from_page(doc, page), metadata=dict({'source':
blob.source, 'file_path': blob.source, 'page': page.number,
'total_pages': len(doc)}, **{k: doc.metadata[k] for k in doc.
metadata if type(doc.metadata[k]) in [str, int]})) for page in doc] | def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazily parse the blob."""
import fitz
with blob.as_bytes_io() as file_path:
if blob.data is None:
doc = fitz.open(file_path)
else:
doc = fitz.open(stream=file_path, filetype='pdf')
yield from [Document(page_content=page.get_text(**self.text_kwargs) +
self._extract_images_from_page(doc, page), metadata=dict({
'source': blob.source, 'file_path': blob.source, 'page': page.
number, 'total_pages': len(doc)}, **{k: doc.metadata[k] for k in
doc.metadata if type(doc.metadata[k]) in [str, int]})) for page in
doc] | Lazily parse the blob. |
test_openai_llm_output_contains_model_name | """Test llm_output contains model_name."""
llm = OpenAI(max_tokens=10)
llm_result = llm.generate(['Hello, how are you?'])
assert llm_result.llm_output is not None
assert llm_result.llm_output['model_name'] == llm.model_name | def test_openai_llm_output_contains_model_name() ->None:
"""Test llm_output contains model_name."""
llm = OpenAI(max_tokens=10)
llm_result = llm.generate(['Hello, how are you?'])
assert llm_result.llm_output is not None
assert llm_result.llm_output['model_name'] == llm.model_name | Test llm_output contains model_name. |
_headers | return {'Authorization': f'bearer {self.deepinfra_api_token}',
'Content-Type': 'application/json'} | def _headers(self) ->Dict:
return {'Authorization': f'bearer {self.deepinfra_api_token}',
'Content-Type': 'application/json'} | null |
_call | """Call out to Aviary
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aviary("Tell me a joke.")
"""
kwargs = {'use_prompt_format': self.use_prompt_format}
if self.version:
kwargs['version'] = self.version
output = get_completions(model=self.model, prompt=prompt, **kwargs)
text = cast(str, output['generated_text'])
if stop:
text = enforce_stop_tokens(text, stop)
return text | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Aviary
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aviary("Tell me a joke.")
"""
kwargs = {'use_prompt_format': self.use_prompt_format}
if self.version:
kwargs['version'] = self.version
output = get_completions(model=self.model, prompt=prompt, **kwargs)
text = cast(str, output['generated_text'])
if stop:
text = enforce_stop_tokens(text, stop)
return text | Call out to Aviary
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aviary("Tell me a joke.") |
__init__ | """Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
'Could not import sentence_transformers python package. Please install it with `pip install sentence-transformers`.'
) from exc
self.client = sentence_transformers.SentenceTransformer(self.model_name,
cache_folder=self.cache_folder, **self.model_kwargs) | def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
'Could not import sentence_transformers python package. Please install it with `pip install sentence-transformers`.'
) from exc
self.client = sentence_transformers.SentenceTransformer(self.model_name,
cache_folder=self.cache_folder, **self.model_kwargs) | Initialize the sentence_transformer. |
test_loading_few_shot_prompt_example_prompt | """Test loading few shot when the example prompt is in its own file."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt('few_shot_prompt_example_prompt.json')
expected_prompt = FewShotPromptTemplate(input_variables=['adjective'],
prefix='Write antonyms for the following words.', example_prompt=
PromptTemplate(input_variables=['input', 'output'], template=
"""Input: {input}
Output: {output}"""), examples=[{'input': 'happy',
'output': 'sad'}, {'input': 'tall', 'output': 'short'}], suffix=
"""Input: {adjective}
Output:""")
assert prompt == expected_prompt | def test_loading_few_shot_prompt_example_prompt() ->None:
"""Test loading few shot when the example prompt is in its own file."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt('few_shot_prompt_example_prompt.json')
expected_prompt = FewShotPromptTemplate(input_variables=[
'adjective'], prefix='Write antonyms for the following words.',
example_prompt=PromptTemplate(input_variables=['input',
'output'], template="""Input: {input}
Output: {output}"""),
examples=[{'input': 'happy', 'output': 'sad'}, {'input': 'tall',
'output': 'short'}], suffix="""Input: {adjective}
Output:""")
assert prompt == expected_prompt | Test loading few shot when the example prompt is in its own file. |
test_pgvector_with_filter_distant_match | """Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'2'})
assert output == [(Document(page_content='baz', metadata={'page': '2'}),
0.0013003906671379406)] | def test_pgvector_with_filter_distant_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '2'})
assert output == [(Document(page_content='baz', metadata={'page': '2'}),
0.0013003906671379406)] | Test end to end construction and search. |
_validate_outputs | super()._validate_outputs(outputs)
if not isinstance(outputs['next_inputs'], dict):
raise ValueError | def _validate_outputs(self, outputs: Dict[str, Any]) ->None:
super()._validate_outputs(outputs)
if not isinstance(outputs['next_inputs'], dict):
raise ValueError | null |
test_func_call_oldstyle | act = json.dumps([{'action_name': 'foo', 'action': {'__arg1': '42'}}])
msg = AIMessage(content='LLM thoughts.', additional_kwargs={'function_call':
{'name': 'foo', 'arguments': f'{{"actions": {act}}}'}})
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == 'foo'
assert action.tool_input == '42'
assert action.log == """
Invoking: `foo` with `42`
responded: LLM thoughts.
"""
assert action.message_log == [msg] | def test_func_call_oldstyle(self) ->None:
act = json.dumps([{'action_name': 'foo', 'action': {'__arg1': '42'}}])
msg = AIMessage(content='LLM thoughts.', additional_kwargs={
'function_call': {'name': 'foo', 'arguments': f'{{"actions": {act}}}'}}
)
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == 'foo'
assert action.tool_input == '42'
assert action.log == '\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n'
assert action.message_log == [msg] | null |
_async_retry_decorator | import openai
async_retrying = AsyncRetrying(reraise=True, stop=stop_after_attempt(
embeddings.max_retries), wait=wait_exponential(multiplier=1, min=
embeddings.retry_min_seconds, max=embeddings.retry_max_seconds), retry=
retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type
(openai.error.APIError) | retry_if_exception_type(openai.error.
APIConnectionError) | retry_if_exception_type(openai.error.
RateLimitError) | retry_if_exception_type(openai.error.
ServiceUnavailableError), before_sleep=before_sleep_log(logger, logging
.WARNING))
def wrap(func: Callable) ->Callable:
async def wrapped_f(*args: Any, **kwargs: Any) ->Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError('this is unreachable')
return wrapped_f
return wrap | def _async_retry_decorator(embeddings: OpenAIEmbeddings) ->Any:
import openai
async_retrying = AsyncRetrying(reraise=True, stop=stop_after_attempt(
embeddings.max_retries), wait=wait_exponential(multiplier=1, min=
embeddings.retry_min_seconds, max=embeddings.retry_max_seconds),
retry=retry_if_exception_type(openai.error.Timeout) |
retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING))
def wrap(func: Callable) ->Callable:
async def wrapped_f(*args: Any, **kwargs: Any) ->Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError('this is unreachable')
return wrapped_f
return wrap | null |
test_graph_sequence | fake_llm = FakeListLLM(responses=['a'])
prompt = PromptTemplate.from_template('Hello, {name}!')
list_parser = CommaSeparatedListOutputParser()
sequence = prompt | fake_llm | list_parser
graph = sequence.get_graph()
assert graph.draw_ascii() == snapshot | def test_graph_sequence(snapshot: SnapshotAssertion) ->None:
fake_llm = FakeListLLM(responses=['a'])
prompt = PromptTemplate.from_template('Hello, {name}!')
list_parser = CommaSeparatedListOutputParser()
sequence = prompt | fake_llm | list_parser
graph = sequence.get_graph()
assert graph.draw_ascii() == snapshot | null |
__iter__ | yield from self._children | def __iter__(self) ->Iterator[Iterator[T]]:
yield from self._children | null |
__deepcopy__ | """Return a deep copy of the callback handler."""
return self | def __deepcopy__(self, memo: Any) ->'OpenAICallbackHandler':
"""Return a deep copy of the callback handler."""
return self | Return a deep copy of the callback handler. |
on_chain_end | self.on_chain_end_common() | def on_chain_end(self, *args: Any, **kwargs: Any) ->Any:
self.on_chain_end_common() | null |
validate_chains | """Validate that the correct inputs exist for all chains."""
chains = values['chains']
input_variables = values['input_variables']
memory_keys = list()
if 'memory' in values and values['memory'] is not None:
"""Validate that prompt input variables are consistent."""
memory_keys = values['memory'].memory_variables
if set(input_variables).intersection(set(memory_keys)):
overlapping_keys = set(input_variables) & set(memory_keys)
raise ValueError(
f"The input key(s) {''.join(overlapping_keys)} are found in the Memory keys ({memory_keys}) - please use input and memory keys that don't overlap."
)
known_variables = set(input_variables + memory_keys)
for chain in chains:
missing_vars = set(chain.input_keys).difference(known_variables)
if chain.memory:
missing_vars = missing_vars.difference(chain.memory.memory_variables)
if missing_vars:
raise ValueError(
f'Missing required input keys: {missing_vars}, only had {known_variables}'
)
overlapping_keys = known_variables.intersection(chain.output_keys)
if overlapping_keys:
raise ValueError(
f'Chain returned keys that already exist: {overlapping_keys}')
known_variables |= set(chain.output_keys)
if 'output_variables' not in values:
if values.get('return_all', False):
output_keys = known_variables.difference(input_variables)
else:
output_keys = chains[-1].output_keys
values['output_variables'] = output_keys
else:
missing_vars = set(values['output_variables']).difference(known_variables)
if missing_vars:
raise ValueError(
f'Expected output variables that were not found: {missing_vars}.')
return values | @root_validator(pre=True)
def validate_chains(cls, values: Dict) ->Dict:
"""Validate that the correct inputs exist for all chains."""
chains = values['chains']
input_variables = values['input_variables']
memory_keys = list()
if 'memory' in values and values['memory'] is not None:
"""Validate that prompt input variables are consistent."""
memory_keys = values['memory'].memory_variables
if set(input_variables).intersection(set(memory_keys)):
overlapping_keys = set(input_variables) & set(memory_keys)
raise ValueError(
f"The input key(s) {''.join(overlapping_keys)} are found in the Memory keys ({memory_keys}) - please use input and memory keys that don't overlap."
)
known_variables = set(input_variables + memory_keys)
for chain in chains:
missing_vars = set(chain.input_keys).difference(known_variables)
if chain.memory:
missing_vars = missing_vars.difference(chain.memory.
memory_variables)
if missing_vars:
raise ValueError(
f'Missing required input keys: {missing_vars}, only had {known_variables}'
)
overlapping_keys = known_variables.intersection(chain.output_keys)
if overlapping_keys:
raise ValueError(
f'Chain returned keys that already exist: {overlapping_keys}')
known_variables |= set(chain.output_keys)
if 'output_variables' not in values:
if values.get('return_all', False):
output_keys = known_variables.difference(input_variables)
else:
output_keys = chains[-1].output_keys
values['output_variables'] = output_keys
else:
missing_vars = set(values['output_variables']).difference(
known_variables)
if missing_vars:
raise ValueError(
f'Expected output variables that were not found: {missing_vars}.'
)
return values | Validate that the correct inputs exist for all chains. |
is_lc_serializable | return False | @classmethod
def is_lc_serializable(cls) ->bool:
return False | null |
embed_documents | """Embed search docs."""
return [i.embedding for i in self._client.embeddings.create(input=texts,
model=self.model).data] | def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed search docs."""
return [i.embedding for i in self._client.embeddings.create(input=texts,
model=self.model).data] | Embed search docs. |
_run | script = strip_markdown_code(python_code)
resp = requests.post('https://exec.bearly.ai/v1/interpreter', data=json.
dumps({'fileContents': script, 'inputFiles': self.make_input_files(),
'outputDir': 'output/', 'outputAsLinks': True}), headers={
'Authorization': self.api_key}).json()
return {'stdout': base64.b64decode(resp['stdoutBasesixtyfour']).decode() if
resp['stdoutBasesixtyfour'] else '', 'stderr': base64.b64decode(resp[
'stderrBasesixtyfour']).decode() if resp['stderrBasesixtyfour'] else '',
'fileLinks': resp['fileLinks'], 'exitCode': resp['exitCode']} | def _run(self, python_code: str) ->dict:
script = strip_markdown_code(python_code)
resp = requests.post('https://exec.bearly.ai/v1/interpreter', data=json
.dumps({'fileContents': script, 'inputFiles': self.make_input_files
(), 'outputDir': 'output/', 'outputAsLinks': True}), headers={
'Authorization': self.api_key}).json()
return {'stdout': base64.b64decode(resp['stdoutBasesixtyfour']).decode(
) if resp['stdoutBasesixtyfour'] else '', 'stderr': base64.
b64decode(resp['stderrBasesixtyfour']).decode() if resp[
'stderrBasesixtyfour'] else '', 'fileLinks': resp['fileLinks'],
'exitCode': resp['exitCode']} | null |
run | """Run query through Google Trends with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {'engine': 'google_jobs', 'api_key': serpapi_api_key.
get_secret_value(), 'q': query}
total_results = []
client = self.serp_search_engine(params)
total_results = client.get_dict()['jobs_results']
res_str = ''
for i in range(1):
job = total_results[i]
res_str += '\n_______________________________________________' + f"""
Job Title: {job['title']}
""" + f"Company Name: {job['company_name']}\n" + f"Location: {job['location']}\n" + f"Description: {job['description']}" + """
_______________________________________________
"""
return res_str + '\n' | def run(self, query: str) ->str:
"""Run query through Google Trends with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {'engine': 'google_jobs', 'api_key': serpapi_api_key.
get_secret_value(), 'q': query}
total_results = []
client = self.serp_search_engine(params)
total_results = client.get_dict()['jobs_results']
res_str = ''
for i in range(1):
job = total_results[i]
res_str += '\n_______________________________________________' + f"""
Job Title: {job['title']}
""" + f"Company Name: {job['company_name']}\n" + f"Location: {job['location']}\n" + f"Description: {job['description']}" + """
_______________________________________________
"""
return res_str + '\n' | Run query through Google Trends with Serpapi |
test_extract_sub_links_exclude | html = (
'<a href="https://foobar.com">one</a><a href="http://baz.net">two</a><a href="//foobar.com/hello">three</a><a href="/how/are/you/doing">four</a><a href="alexis.html"</a>'
)
expected = sorted(['http://baz.net', 'https://foobar.com',
'https://foobar.com/hello', 'https://foobar.com/hello/alexis.html'])
actual = sorted(extract_sub_links(html,
'https://foobar.com/hello/bill.html', base_url='https://foobar.com',
prevent_outside=False, exclude_prefixes=('https://foobar.com/how',
'http://baz.org')))
assert actual == expected | def test_extract_sub_links_exclude() ->None:
html = (
'<a href="https://foobar.com">one</a><a href="http://baz.net">two</a><a href="//foobar.com/hello">three</a><a href="/how/are/you/doing">four</a><a href="alexis.html"</a>'
)
expected = sorted(['http://baz.net', 'https://foobar.com',
'https://foobar.com/hello', 'https://foobar.com/hello/alexis.html'])
actual = sorted(extract_sub_links(html,
'https://foobar.com/hello/bill.html', base_url='https://foobar.com',
prevent_outside=False, exclude_prefixes=('https://foobar.com/how',
'http://baz.org')))
assert actual == expected | null |
__query_cluster | """Query the BagelDB cluster based on the provided parameters."""
try:
import bagel
except ImportError:
raise ImportError('Please install bagel `pip install betabageldb`.')
return self._cluster.find(query_texts=query_texts, query_embeddings=
query_embeddings, n_results=n_results, where=where, **kwargs) | @xor_args(('query_texts', 'query_embeddings'))
def __query_cluster(self, query_texts: Optional[List[str]]=None,
query_embeddings: Optional[List[List[float]]]=None, n_results: int=4,
where: Optional[Dict[str, str]]=None, **kwargs: Any) ->List[Document]:
"""Query the BagelDB cluster based on the provided parameters."""
try:
import bagel
except ImportError:
raise ImportError('Please install bagel `pip install betabageldb`.')
return self._cluster.find(query_texts=query_texts, query_embeddings=
query_embeddings, n_results=n_results, where=where, **kwargs) | Query the BagelDB cluster based on the provided parameters. |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'chat_models', 'openai'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'chat_models', 'openai'] | Get the namespace of the langchain object. |
_load_sql_database_chain | from langchain_experimental.sql import SQLDatabaseChain
if 'database' in kwargs:
database = kwargs.pop('database')
else:
raise ValueError('`database` must be present.')
if 'llm_chain' in config:
llm_chain_config = config.pop('llm_chain')
chain = load_chain_from_config(llm_chain_config)
return SQLDatabaseChain(llm_chain=chain, database=database, **config)
if 'llm' in config:
llm_config = config.pop('llm')
llm = load_llm_from_config(llm_config)
elif 'llm_path' in config:
llm = load_llm(config.pop('llm_path'))
else:
raise ValueError('One of `llm` or `llm_path` must be present.')
if 'prompt' in config:
prompt_config = config.pop('prompt')
prompt = load_prompt_from_config(prompt_config)
else:
prompt = None
return SQLDatabaseChain.from_llm(llm, database, prompt=prompt, **config) | def _load_sql_database_chain(config: dict, **kwargs: Any) ->Any:
from langchain_experimental.sql import SQLDatabaseChain
if 'database' in kwargs:
database = kwargs.pop('database')
else:
raise ValueError('`database` must be present.')
if 'llm_chain' in config:
llm_chain_config = config.pop('llm_chain')
chain = load_chain_from_config(llm_chain_config)
return SQLDatabaseChain(llm_chain=chain, database=database, **config)
if 'llm' in config:
llm_config = config.pop('llm')
llm = load_llm_from_config(llm_config)
elif 'llm_path' in config:
llm = load_llm(config.pop('llm_path'))
else:
raise ValueError('One of `llm` or `llm_path` must be present.')
if 'prompt' in config:
prompt_config = config.pop('prompt')
prompt = load_prompt_from_config(prompt_config)
else:
prompt = None
return SQLDatabaseChain.from_llm(llm, database, prompt=prompt, **config) | null |
test_pickbest_textembedder_w_full_label_no_emb | feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False,
model=MockEncoder())
named_actions = {'action1': ['0', '1', '2']}
expected = """shared |context context
0:-0.0:1.0 |action1 0
|action1 1
|action1 2 """
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0
)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on={'context': 'context'}, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected | @pytest.mark.requires('vowpal_wabbit_next')
def test_pickbest_textembedder_w_full_label_no_emb() ->None:
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder())
named_actions = {'action1': ['0', '1', '2']}
expected = (
'shared |context context \n0:-0.0:1.0 |action1 0 \n|action1 1 \n|action1 2 '
)
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0,
score=0.0)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on={'context': 'context'}, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected | null |
__init__ | confluence_kwargs = confluence_kwargs or {}
errors = ConfluenceLoader.validate_init_args(url=url, api_key=api_key,
username=username, session=session, oauth2=oauth2, token=token)
if errors:
raise ValueError(f'Error(s) while validating input: {errors}')
try:
from atlassian import Confluence
except ImportError:
raise ImportError(
'`atlassian` package not found, please run `pip install atlassian-python-api`'
)
self.base_url = url
self.number_of_retries = number_of_retries
self.min_retry_seconds = min_retry_seconds
self.max_retry_seconds = max_retry_seconds
if session:
self.confluence = Confluence(url=url, session=session, **confluence_kwargs)
elif oauth2:
self.confluence = Confluence(url=url, oauth2=oauth2, cloud=cloud, **
confluence_kwargs)
elif token:
self.confluence = Confluence(url=url, token=token, cloud=cloud, **
confluence_kwargs)
else:
self.confluence = Confluence(url=url, username=username, password=
api_key, cloud=cloud, **confluence_kwargs) | def __init__(self, url: str, api_key: Optional[str]=None, username:
Optional[str]=None, session: Optional[requests.Session]=None, oauth2:
Optional[dict]=None, token: Optional[str]=None, cloud: Optional[bool]=
True, number_of_retries: Optional[int]=3, min_retry_seconds: Optional[
int]=2, max_retry_seconds: Optional[int]=10, confluence_kwargs:
Optional[dict]=None):
confluence_kwargs = confluence_kwargs or {}
errors = ConfluenceLoader.validate_init_args(url=url, api_key=api_key,
username=username, session=session, oauth2=oauth2, token=token)
if errors:
raise ValueError(f'Error(s) while validating input: {errors}')
try:
from atlassian import Confluence
except ImportError:
raise ImportError(
'`atlassian` package not found, please run `pip install atlassian-python-api`'
)
self.base_url = url
self.number_of_retries = number_of_retries
self.min_retry_seconds = min_retry_seconds
self.max_retry_seconds = max_retry_seconds
if session:
self.confluence = Confluence(url=url, session=session, **
confluence_kwargs)
elif oauth2:
self.confluence = Confluence(url=url, oauth2=oauth2, cloud=cloud,
**confluence_kwargs)
elif token:
self.confluence = Confluence(url=url, token=token, cloud=cloud, **
confluence_kwargs)
else:
self.confluence = Confluence(url=url, username=username, password=
api_key, cloud=cloud, **confluence_kwargs) | null |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable'] | Get the namespace of the langchain object. |
__getattr__ | """Raise an error on import since is deprecated."""
raise ImportError(
'This module has been moved to langchain-experimental. For more details: https://github.com/langchain-ai/langchain/discussions/11352.To access this code, install it with `pip install langchain-experimental`.`from langchain_experimental.llm_bash.base import LLMBashChain`'
) | def __getattr__(name: str='') ->None:
"""Raise an error on import since is deprecated."""
raise ImportError(
'This module has been moved to langchain-experimental. For more details: https://github.com/langchain-ai/langchain/discussions/11352.To access this code, install it with `pip install langchain-experimental`.`from langchain_experimental.llm_bash.base import LLMBashChain`'
) | Raise an error on import since is deprecated. |
_format_chat_history | buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer | def _format_chat_history(chat_history: List[Tuple[str, str]]):
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer | null |
_llm_type | """Return the type of chat model."""
return 'zhipuai' | @property
def _llm_type(self) ->str:
"""Return the type of chat model."""
return 'zhipuai' | Return the type of chat model. |
test_callback_manager_configure_context_vars | """Test callback manager configuration."""
monkeypatch.setenv('LANGCHAIN_TRACING_V2', 'true')
monkeypatch.setenv('LANGCHAIN_TRACING', 'false')
with patch.object(LangChainTracer, '_update_run_single'):
with patch.object(LangChainTracer, '_persist_run_single'):
with trace_as_chain_group('test') as group_manager:
assert len(group_manager.handlers) == 1
tracer = group_manager.handlers[0]
assert isinstance(tracer, LangChainTracer)
with get_openai_callback() as cb:
assert cb.successful_requests == 0
assert cb.total_tokens == 0
mngr = CallbackManager.configure(group_manager)
assert mngr.handlers == [tracer, cb]
assert group_manager.handlers == [tracer]
response = LLMResult(generations=[], llm_output={
'token_usage': {'prompt_tokens': 2, 'completion_tokens':
1, 'total_tokens': 3}, 'model_name': BaseOpenAI.
__fields__['model_name'].default})
mngr.on_llm_start({}, ['prompt'])[0].on_llm_end(response)
assert cb.successful_requests == 1
assert cb.total_tokens == 3
assert cb.prompt_tokens == 2
assert cb.completion_tokens == 1
assert cb.total_cost > 0
with get_openai_callback() as cb:
assert cb.successful_requests == 0
assert cb.total_tokens == 0
mngr = CallbackManager.configure(group_manager)
assert mngr.handlers == [tracer, cb]
assert group_manager.handlers == [tracer]
response = LLMResult(generations=[], llm_output={
'token_usage': {'prompt_tokens': 2, 'completion_tokens':
1, 'total_tokens': 3}, 'model_name': BaseOpenAI.
__fields__['model_name'].default})
mngr.on_llm_start({}, ['prompt'])[0].on_llm_end(response)
assert cb.successful_requests == 1
assert cb.total_tokens == 3
assert cb.prompt_tokens == 2
assert cb.completion_tokens == 1
assert cb.total_cost > 0
wait_for_all_tracers()
assert LangChainTracer._persist_run_single.call_count == 1 | def test_callback_manager_configure_context_vars(monkeypatch: pytest.
MonkeyPatch) ->None:
"""Test callback manager configuration."""
monkeypatch.setenv('LANGCHAIN_TRACING_V2', 'true')
monkeypatch.setenv('LANGCHAIN_TRACING', 'false')
with patch.object(LangChainTracer, '_update_run_single'):
with patch.object(LangChainTracer, '_persist_run_single'):
with trace_as_chain_group('test') as group_manager:
assert len(group_manager.handlers) == 1
tracer = group_manager.handlers[0]
assert isinstance(tracer, LangChainTracer)
with get_openai_callback() as cb:
assert cb.successful_requests == 0
assert cb.total_tokens == 0
mngr = CallbackManager.configure(group_manager)
assert mngr.handlers == [tracer, cb]
assert group_manager.handlers == [tracer]
response = LLMResult(generations=[], llm_output={
'token_usage': {'prompt_tokens': 2,
'completion_tokens': 1, 'total_tokens': 3},
'model_name': BaseOpenAI.__fields__['model_name'].
default})
mngr.on_llm_start({}, ['prompt'])[0].on_llm_end(response)
assert cb.successful_requests == 1
assert cb.total_tokens == 3
assert cb.prompt_tokens == 2
assert cb.completion_tokens == 1
assert cb.total_cost > 0
with get_openai_callback() as cb:
assert cb.successful_requests == 0
assert cb.total_tokens == 0
mngr = CallbackManager.configure(group_manager)
assert mngr.handlers == [tracer, cb]
assert group_manager.handlers == [tracer]
response = LLMResult(generations=[], llm_output={
'token_usage': {'prompt_tokens': 2,
'completion_tokens': 1, 'total_tokens': 3},
'model_name': BaseOpenAI.__fields__['model_name'].
default})
mngr.on_llm_start({}, ['prompt'])[0].on_llm_end(response)
assert cb.successful_requests == 1
assert cb.total_tokens == 3
assert cb.prompt_tokens == 2
assert cb.completion_tokens == 1
assert cb.total_cost > 0
wait_for_all_tracers()
assert LangChainTracer._persist_run_single.call_count == 1 | Test callback manager configuration. |
_import_stochasticai | from langchain_community.llms.stochasticai import StochasticAI
return StochasticAI | def _import_stochasticai() ->Any:
from langchain_community.llms.stochasticai import StochasticAI
return StochasticAI | null |
_get_channel | try:
import grpc
except ImportError:
raise ValueError(
'Could not import grpcio python package. Please install it with `pip install grpcio`.'
)
return grpc.secure_channel(self.target, self.grpc_credentials, options=self
.grpc_options) if self.grpc_use_secure else grpc.insecure_channel(self.
target, options=self.grpc_options) | def _get_channel(self) ->Any:
try:
import grpc
except ImportError:
raise ValueError(
'Could not import grpcio python package. Please install it with `pip install grpcio`.'
)
return grpc.secure_channel(self.target, self.grpc_credentials, options=
self.grpc_options) if self.grpc_use_secure else grpc.insecure_channel(
self.target, options=self.grpc_options) | null |
test_python_code_splitter | splitter = RecursiveCharacterTextSplitter.from_language(Language.PYTHON,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
def hello_world():
print("Hello, World!")
# Call the function
hello_world()
"""
chunks = splitter.split_text(code)
assert chunks == ['def', 'hello_world():', 'print("Hello,', 'World!")',
'# Call the', 'function', 'hello_world()'] | def test_python_code_splitter() ->None:
splitter = RecursiveCharacterTextSplitter.from_language(Language.PYTHON,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
def hello_world():
print("Hello, World!")
# Call the function
hello_world()
"""
chunks = splitter.split_text(code)
assert chunks == ['def', 'hello_world():', 'print("Hello,', 'World!")',
'# Call the', 'function', 'hello_world()'] | null |
_on_tool_error | """Process the Tool Run upon error.""" | def _on_tool_error(self, run: Run) ->None:
"""Process the Tool Run upon error.""" | Process the Tool Run upon error. |
elasticsearch_connection | from elasticsearch import Elasticsearch
es_url = os.environ.get('ES_URL', 'http://localhost:9200')
es_cloud_id = os.environ.get('ES_CLOUD_ID')
es_username = os.environ.get('ES_USERNAME', 'elastic')
es_password = os.environ.get('ES_PASSWORD', 'changeme')
if es_cloud_id:
es = Elasticsearch(cloud_id=es_cloud_id, basic_auth=(es_username,
es_password))
yield {'es_cloud_id': es_cloud_id, 'es_user': es_username,
'es_password': es_password}
else:
es = Elasticsearch(hosts=es_url)
yield {'es_url': es_url}
index_names = es.indices.get(index='_all').keys()
for index_name in index_names:
if index_name.startswith('test_'):
es.indices.delete(index=index_name)
es.indices.refresh(index='_all') | @pytest.fixture(scope='class', autouse=True)
def elasticsearch_connection(self) ->Union[dict, Generator[dict, None, None]]:
from elasticsearch import Elasticsearch
es_url = os.environ.get('ES_URL', 'http://localhost:9200')
es_cloud_id = os.environ.get('ES_CLOUD_ID')
es_username = os.environ.get('ES_USERNAME', 'elastic')
es_password = os.environ.get('ES_PASSWORD', 'changeme')
if es_cloud_id:
es = Elasticsearch(cloud_id=es_cloud_id, basic_auth=(es_username,
es_password))
yield {'es_cloud_id': es_cloud_id, 'es_user': es_username,
'es_password': es_password}
else:
es = Elasticsearch(hosts=es_url)
yield {'es_url': es_url}
index_names = es.indices.get(index='_all').keys()
for index_name in index_names:
if index_name.startswith('test_'):
es.indices.delete(index=index_name)
es.indices.refresh(index='_all') | null |
_get_tool_classes | results = []
for tool_class_name in tools_all:
if tool_class_name in _DEPRECATED_TOOLS:
continue
tool_class = getattr(langchain_community.tools, tool_class_name)
if isinstance(tool_class, type) and issubclass(tool_class, BaseTool):
if tool_class in _EXCLUDE:
continue
if skip_tools_without_default_names and tool_class.__fields__['name'
].default in [None, '']:
continue
results.append(tool_class)
return results | def _get_tool_classes(skip_tools_without_default_names: bool) ->List[Type[
BaseTool]]:
results = []
for tool_class_name in tools_all:
if tool_class_name in _DEPRECATED_TOOLS:
continue
tool_class = getattr(langchain_community.tools, tool_class_name)
if isinstance(tool_class, type) and issubclass(tool_class, BaseTool):
if tool_class in _EXCLUDE:
continue
if skip_tools_without_default_names and tool_class.__fields__[
'name'].default in [None, '']:
continue
results.append(tool_class)
return results | null |
with_listeners | """
Bind lifecycle listeners to a Runnable, returning a new Runnable.
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
"""
from langchain_core.tracers.root_listeners import RootListenersTracer
return RunnableBinding(bound=self, config_factories=[lambda config: {
'callbacks': [RootListenersTracer(config=config, on_start=on_start,
on_end=on_end, on_error=on_error)]}]) | def with_listeners(self, *, on_start: Optional[Listener]=None, on_end:
Optional[Listener]=None, on_error: Optional[Listener]=None) ->Runnable[
Input, Output]:
"""
Bind lifecycle listeners to a Runnable, returning a new Runnable.
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
"""
from langchain_core.tracers.root_listeners import RootListenersTracer
return RunnableBinding(bound=self, config_factories=[lambda config: {
'callbacks': [RootListenersTracer(config=config, on_start=on_start,
on_end=on_end, on_error=on_error)]}]) | Bind lifecycle listeners to a Runnable, returning a new Runnable.
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run. |
gen_indexes | for i in range(next(length_iter)):
yield i | def gen_indexes(length_iter: Iterator[int]) ->Iterator[int]:
for i in range(next(length_iter)):
yield i | null |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
get_tools | """Get the tools in the toolkit."""
return [QuerySparkSQLTool(db=self.db), InfoSparkSQLTool(db=self.db),
ListSparkSQLTool(db=self.db), QueryCheckerTool(db=self.db, llm=self.llm)] | def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
return [QuerySparkSQLTool(db=self.db), InfoSparkSQLTool(db=self.db),
ListSparkSQLTool(db=self.db), QueryCheckerTool(db=self.db, llm=self
.llm)] | Get the tools in the toolkit. |
_llm_type | """Return type of llm."""
return 'vllm' | @property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'vllm' | Return type of llm. |
transform_input | return {'inputs': prompt, 'parameters': model_kwargs} | @classmethod
def transform_input(cls, prompt: str, model_kwargs: Dict[str, Any]) ->Dict[
str, Any]:
return {'inputs': prompt, 'parameters': model_kwargs} | null |
__init__ | """Initialize Alibaba Cloud MaxCompute document loader.
Args:
query: SQL query to execute.
api_wrapper: MaxCompute API wrapper.
page_content_columns: The columns to write into the `page_content` of the
Document. If unspecified, all columns will be written to `page_content`.
metadata_columns: The columns to write into the `metadata` of the Document.
If unspecified, all columns not added to `page_content` will be written.
"""
self.query = query
self.api_wrapper = api_wrapper
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns | def __init__(self, query: str, api_wrapper: MaxComputeAPIWrapper, *,
page_content_columns: Optional[Sequence[str]]=None, metadata_columns:
Optional[Sequence[str]]=None):
"""Initialize Alibaba Cloud MaxCompute document loader.
Args:
query: SQL query to execute.
api_wrapper: MaxCompute API wrapper.
page_content_columns: The columns to write into the `page_content` of the
Document. If unspecified, all columns will be written to `page_content`.
metadata_columns: The columns to write into the `metadata` of the Document.
If unspecified, all columns not added to `page_content` will be written.
"""
self.query = query
self.api_wrapper = api_wrapper
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns | Initialize Alibaba Cloud MaxCompute document loader.
Args:
query: SQL query to execute.
api_wrapper: MaxCompute API wrapper.
page_content_columns: The columns to write into the `page_content` of the
Document. If unspecified, all columns will be written to `page_content`.
metadata_columns: The columns to write into the `metadata` of the Document.
If unspecified, all columns not added to `page_content` will be written. |
__init__ | self.loads: Set[str] = set()
self.stores: Set[str] = set() | def __init__(self) ->None:
self.loads: Set[str] = set()
self.stores: Set[str] = set() | null |
save | """Saves the data to the persist_path""" | @abstractmethod
def save(self, data: Any) ->None:
"""Saves the data to the persist_path""" | Saves the data to the persist_path |
_run | """Use the PubMed tool."""
return self.api_wrapper.run(query) | def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the PubMed tool."""
return self.api_wrapper.run(query) | Use the PubMed tool. |
list_files_in_bot_branch | """
Fetches all files in the active branch of the repo,
the branch the bot uses to make changes.
Returns:
str: A plaintext list containing the the filepaths in the branch.
"""
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents('', ref=self.
active_branch)
for content in contents:
if content.type == 'dir':
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
if files:
files_str = '\n'.join(files)
return (
f'Found {len(files)} files in branch `{self.active_branch}`:\n{files_str}'
)
else:
return f'No files found in branch: `{self.active_branch}`'
except Exception as e:
return f'Error: {e}' | def list_files_in_bot_branch(self) ->str:
"""
Fetches all files in the active branch of the repo,
the branch the bot uses to make changes.
Returns:
str: A plaintext list containing the the filepaths in the branch.
"""
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents('', ref=self.
active_branch)
for content in contents:
if content.type == 'dir':
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
if files:
files_str = '\n'.join(files)
return (
f'Found {len(files)} files in branch `{self.active_branch}`:\n{files_str}'
)
else:
return f'No files found in branch: `{self.active_branch}`'
except Exception as e:
return f'Error: {e}' | Fetches all files in the active branch of the repo,
the branch the bot uses to make changes.
Returns:
str: A plaintext list containing the the filepaths in the branch. |
_run | """Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
return str(page.url) | def _run(self, run_manager: Optional[CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
return str(page.url) | Use the tool. |
get | """Return docs according ids.
Args:
ids: The ids of the embedding vectors.
Returns:
Documents which satisfy the input conditions.
"""
results: Dict[str, Document] = {}
if ids is None or ids.__len__() == 0:
return results
if self.flag:
query_data = {'query': {'ids': ids}}
docs_detail = self.vearch.mget_by_ids(self.using_db_name, self.
using_table_name, query_data)
for record in docs_detail:
if record['found'] is False:
continue
content = ''
meta_info = {}
for field in record['_source']:
if field == 'text':
content = record['_source'][field]
continue
elif field == 'metadata':
meta_info['source'] = record['_source'][field]
continue
results[record['_id']] = Document(page_content=content, metadata=
meta_info)
else:
for id in ids:
docs_detail = self.vearch.get_doc_by_id(id)
if docs_detail == {}:
continue
content = ''
meta_info = {}
for field in docs_detail:
if field == 'text':
content = docs_detail[field]
continue
elif field == 'metadata':
meta_info['source'] = docs_detail[field]
continue
results[docs_detail['_id']] = Document(page_content=content,
metadata=meta_info)
return results | def get(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Dict[str,
Document]:
"""Return docs according ids.
Args:
ids: The ids of the embedding vectors.
Returns:
Documents which satisfy the input conditions.
"""
results: Dict[str, Document] = {}
if ids is None or ids.__len__() == 0:
return results
if self.flag:
query_data = {'query': {'ids': ids}}
docs_detail = self.vearch.mget_by_ids(self.using_db_name, self.
using_table_name, query_data)
for record in docs_detail:
if record['found'] is False:
continue
content = ''
meta_info = {}
for field in record['_source']:
if field == 'text':
content = record['_source'][field]
continue
elif field == 'metadata':
meta_info['source'] = record['_source'][field]
continue
results[record['_id']] = Document(page_content=content,
metadata=meta_info)
else:
for id in ids:
docs_detail = self.vearch.get_doc_by_id(id)
if docs_detail == {}:
continue
content = ''
meta_info = {}
for field in docs_detail:
if field == 'text':
content = docs_detail[field]
continue
elif field == 'metadata':
meta_info['source'] = docs_detail[field]
continue
results[docs_detail['_id']] = Document(page_content=content,
metadata=meta_info)
return results | Return docs according ids.
Args:
ids: The ids of the embedding vectors.
Returns:
Documents which satisfy the input conditions. |
get_collection | return self.CollectionStore.get_by_name(session, self.collection_name) | def get_collection(self, session: Session) ->Any:
return self.CollectionStore.get_by_name(session, self.collection_name) | null |
test_default_call | """Test default model call."""
chat = ChatZhipuAI()
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | def test_default_call() ->None:
"""Test default model call."""
chat = ChatZhipuAI()
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | Test default model call. |
_mathpix_headers | return {'app_id': self.mathpix_api_id, 'app_key': self.mathpix_api_key} | @property
def _mathpix_headers(self) ->Dict[str, str]:
return {'app_id': self.mathpix_api_id, 'app_key': self.mathpix_api_key} | null |
test_handles_empty_input_list | output = format_to_openai_tool_messages([])
assert output == [] | def test_handles_empty_input_list() ->None:
output = format_to_openai_tool_messages([])
assert output == [] | null |
_invocation_params | """Get the parameters used to invoke the model."""
jinachat_creds: Dict[str, Any] = {'api_key': self.jinachat_api_key and self
.jinachat_api_key.get_secret_value(), 'api_base':
'https://api.chat.jina.ai/v1', 'model': 'jinachat'}
return {**jinachat_creds, **self._default_params} | @property
def _invocation_params(self) ->Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
jinachat_creds: Dict[str, Any] = {'api_key': self.jinachat_api_key and
self.jinachat_api_key.get_secret_value(), 'api_base':
'https://api.chat.jina.ai/v1', 'model': 'jinachat'}
return {**jinachat_creds, **self._default_params} | Get the parameters used to invoke the model. |
memory_variables | """Will always return list of memory variables.
:meta private:
"""
return ['entities', self.chat_history_key] | @property
def memory_variables(self) ->List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ['entities', self.chat_history_key] | Will always return list of memory variables.
:meta private: |
count_matching_files | """Count files that match the pattern without loading them."""
num = 0
for _ in self._yield_paths():
num += 1
return num | def count_matching_files(self) ->int:
"""Count files that match the pattern without loading them."""
num = 0
for _ in self._yield_paths():
num += 1
return num | Count files that match the pattern without loading them. |
similarity_search | """Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(self.embedding_function.embed_query
(query), k, where_str, **kwargs) | def similarity_search(self, query: str, k: int=4, where_str: Optional[str]=
None, **kwargs: Any) ->List[Document]:
"""Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(self.embedding_function.
embed_query(query), k, where_str, **kwargs) | Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents |
_run | """Use the tool."""
available_tool_names_str = ', '.join([tool for tool in available_tool_names])
return f'{requested_tool_name} is not a valid tool, try one of [{available_tool_names_str}].' | def _run(self, requested_tool_name: str, available_tool_names: List[str],
run_manager: Optional[CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
available_tool_names_str = ', '.join([tool for tool in
available_tool_names])
return (
f'{requested_tool_name} is not a valid tool, try one of [{available_tool_names_str}].'
) | Use the tool. |
_call | headers: Dict = {'Authorization': cast(SecretStr, self.cerebriumai_api_key)
.get_secret_value(), 'Content-Type': 'application/json'}
params = self.model_kwargs or {}
payload = {'prompt': prompt, **params, **kwargs}
response = requests.post(self.endpoint_url, json=payload, headers=headers)
if response.status_code == 200:
data = response.json()
text = data['result']
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
else:
response.raise_for_status()
return '' | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
headers: Dict = {'Authorization': cast(SecretStr, self.
cerebriumai_api_key).get_secret_value(), 'Content-Type':
'application/json'}
params = self.model_kwargs or {}
payload = {'prompt': prompt, **params, **kwargs}
response = requests.post(self.endpoint_url, json=payload, headers=headers)
if response.status_code == 200:
data = response.json()
text = data['result']
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
else:
response.raise_for_status()
return '' | null |
_get_relevant_documents | query_dict = {'query': {'match': {'content': query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res['hits']['hits']:
docs.append(Document(page_content=r['_source']['content']))
return docs | def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
query_dict = {'query': {'match': {'content': query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res['hits']['hits']:
docs.append(Document(page_content=r['_source']['content']))
return docs | null |
get_default_host | """Gets the default Databricks workspace hostname.
Raises an error if the hostname cannot be automatically determined.
"""
host = os.getenv('DATABRICKS_HOST')
if not host:
try:
host = get_repl_context().browserHostName
if not host:
raise ValueError("context doesn't contain browserHostName.")
except Exception as e:
raise ValueError(
f"host was not set and cannot be automatically inferred. Set environment variable 'DATABRICKS_HOST'. Received error: {e}"
)
host = host.lstrip('https://').lstrip('http://').rstrip('/')
return host | def get_default_host() ->str:
"""Gets the default Databricks workspace hostname.
Raises an error if the hostname cannot be automatically determined.
"""
host = os.getenv('DATABRICKS_HOST')
if not host:
try:
host = get_repl_context().browserHostName
if not host:
raise ValueError("context doesn't contain browserHostName.")
except Exception as e:
raise ValueError(
f"host was not set and cannot be automatically inferred. Set environment variable 'DATABRICKS_HOST'. Received error: {e}"
)
host = host.lstrip('https://').lstrip('http://').rstrip('/')
return host | Gets the default Databricks workspace hostname.
Raises an error if the hostname cannot be automatically determined. |
__set_name__ | nonlocal _name
if _name == '<lambda>':
_name = set_name | def __set_name__(self, owner, set_name):
nonlocal _name
if _name == '<lambda>':
_name = set_name | null |
_run | """Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
results = _get_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False) | def _run(self, selector: str, attributes: Sequence[str]=['innerText'],
run_manager: Optional[CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
results = _get_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False) | Use the tool. |
_import_file_management_MoveFileTool | from langchain_community.tools.file_management import MoveFileTool
return MoveFileTool | def _import_file_management_MoveFileTool() ->Any:
from langchain_community.tools.file_management import MoveFileTool
return MoveFileTool | null |
resize_base64_image | """
Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image.
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode('utf-8') | def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image.
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode('utf-8') | Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image. |
_import_python | from langchain_community.utilities.python import PythonREPL
return PythonREPL | def _import_python() ->Any:
from langchain_community.utilities.python import PythonREPL
return PythonREPL | null |
test_multiline_output_parsing | _test_convo_output(
"""
Thought: Do I need to use a tool? Yes
Action: evaluate_code
Action Input: Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1])
```
"""
, 'evaluate_code',
"""
Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1])
```"""
.lstrip()) | def test_multiline_output_parsing() ->None:
_test_convo_output(
"""
Thought: Do I need to use a tool? Yes
Action: evaluate_code
Action Input: Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1])
```
"""
, 'evaluate_code',
"""
Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1])
```"""
.lstrip()) | null |
save_context | """Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents) | def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents) | Save context from this conversation to buffer. |
_llm_type | return 'yandex_gpt' | @property
def _llm_type(self) ->str:
return 'yandex_gpt' | null |
similarity_search | """
Return Jaguar documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5.
where: the where clause in select similarity. For example a
where can be "rating > 3.0 and (state = 'NV' or state = 'CA')"
Returns:
List of Documents most similar to the query
"""
docs_and_scores = self.similarity_search_with_score(query, k=k, where=where,
metadatas=metadatas, **kwargs)
return [doc for doc, _ in docs_and_scores] | def similarity_search(self, query: str, k: int=3, where: Optional[str]=None,
metadatas: Optional[List[str]]=None, **kwargs: Any) ->List[Document]:
"""
Return Jaguar documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5.
where: the where clause in select similarity. For example a
where can be "rating > 3.0 and (state = 'NV' or state = 'CA')"
Returns:
List of Documents most similar to the query
"""
docs_and_scores = self.similarity_search_with_score(query, k=k, where=
where, metadatas=metadatas, **kwargs)
return [doc for doc, _ in docs_and_scores] | Return Jaguar documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5.
where: the where clause in select similarity. For example a
where can be "rating > 3.0 and (state = 'NV' or state = 'CA')"
Returns:
List of Documents most similar to the query |
worker | old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, globals, locals)
sys.stdout = old_stdout
queue.put(mystdout.getvalue())
except Exception as e:
sys.stdout = old_stdout
queue.put(repr(e)) | @classmethod
def worker(cls, command: str, globals: Optional[Dict], locals: Optional[
Dict], queue: multiprocessing.Queue) ->None:
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, globals, locals)
sys.stdout = old_stdout
queue.put(mystdout.getvalue())
except Exception as e:
sys.stdout = old_stdout
queue.put(repr(e)) | null |
structured_tool_input | """Return the arguments directly."""
return f'{arg1} {arg2} {arg3}' | @tool
def structured_tool_input(arg1: int, arg2: bool, arg3: Optional[dict]=None
) ->str:
"""Return the arguments directly."""
return f'{arg1} {arg2} {arg3}' | Return the arguments directly. |
test_neo4jvector_relevance_score | """Test to make sure the relevance score is scaled to 0-1."""
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(texts=texts, embedding=
FakeEmbeddingsWithOsDimension(), metadatas=metadatas, url=url, username
=username, password=password, pre_delete_collection=True)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
1.0), (Document(page_content='bar', metadata={'page': '1'}),
0.9998376369476318), (Document(page_content='baz', metadata={'page':
'2'}), 0.9993523359298706)]
drop_vector_indexes(docsearch) | def test_neo4jvector_relevance_score() ->None:
"""Test to make sure the relevance score is scaled to 0-1."""
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(texts=texts, embedding=
FakeEmbeddingsWithOsDimension(), metadatas=metadatas, url=url,
username=username, password=password, pre_delete_collection=True)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
1.0), (Document(page_content='bar', metadata={'page': '1'}),
0.9998376369476318), (Document(page_content='baz', metadata={'page':
'2'}), 0.9993523359298706)]
drop_vector_indexes(docsearch) | Test to make sure the relevance score is scaled to 0-1. |
add_texts | """Add the given texts to the store (insert behavior)."""
raise NotImplementedError() | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[Any,
Any]]]=None, **kwargs: Any) ->List[str]:
"""Add the given texts to the store (insert behavior)."""
raise NotImplementedError() | Add the given texts to the store (insert behavior). |
_identifying_params | """Get the identifying parameters."""
return {'model_id': self.model_id, 'model_kwargs': self.model_kwargs,
'pipeline_kwargs': self.pipeline_kwargs} | @property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {'model_id': self.model_id, 'model_kwargs': self.model_kwargs,
'pipeline_kwargs': self.pipeline_kwargs} | Get the identifying parameters. |
create_ernie_fn_chain | """[Legacy] Create an LLM chain that uses Ernie functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid Ernie functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the Ernie function-calling API.
prompt: BasePromptTemplate to pass to the model.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
Returns:
An LLMChain that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.ernie_functions import create_ernie_fn_chain
from langchain_community.chat_models import ErnieBotChat
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
class RecordPerson(BaseModel):
""\"Record some identifying information about a person.""\"
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
""\"Record some identifying information about a dog.""\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ErnieBotChat(model_name="ERNIE-Bot-4")
prompt = ChatPromptTemplate.from_messages(
[
("user", "Make calls to the relevant function to record the entities in the following input: {input}"),
("assistant", "OK!"),
("user", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_ernie_fn_chain([RecordPerson, RecordDog], llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
"""
if not functions:
raise ValueError('Need to pass in at least one function. Received zero.')
ernie_functions = [convert_to_ernie_function(f) for f in functions]
output_parser = output_parser or get_ernie_output_parser(functions)
llm_kwargs: Dict[str, Any] = {'functions': ernie_functions}
if len(ernie_functions) == 1:
llm_kwargs['function_call'] = {'name': ernie_functions[0]['name']}
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser,
llm_kwargs=llm_kwargs, output_key=output_key, **kwargs)
return llm_chain | def create_ernie_fn_chain(functions: Sequence[Union[Dict[str, Any], Type[
BaseModel], Callable]], llm: BaseLanguageModel, prompt:
BasePromptTemplate, *, output_key: str='function', output_parser:
Optional[BaseLLMOutputParser]=None, **kwargs: Any) ->LLMChain:
"""[Legacy] Create an LLM chain that uses Ernie functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid Ernie functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the Ernie function-calling API.
prompt: BasePromptTemplate to pass to the model.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
Returns:
An LLMChain that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.ernie_functions import create_ernie_fn_chain
from langchain_community.chat_models import ErnieBotChat
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
class RecordPerson(BaseModel):
""\"Record some identifying information about a person.""\"
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
""\"Record some identifying information about a dog.""\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ErnieBotChat(model_name="ERNIE-Bot-4")
prompt = ChatPromptTemplate.from_messages(
[
("user", "Make calls to the relevant function to record the entities in the following input: {input}"),
("assistant", "OK!"),
("user", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_ernie_fn_chain([RecordPerson, RecordDog], llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
"""
if not functions:
raise ValueError(
'Need to pass in at least one function. Received zero.')
ernie_functions = [convert_to_ernie_function(f) for f in functions]
output_parser = output_parser or get_ernie_output_parser(functions)
llm_kwargs: Dict[str, Any] = {'functions': ernie_functions}
if len(ernie_functions) == 1:
llm_kwargs['function_call'] = {'name': ernie_functions[0]['name']}
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=
output_parser, llm_kwargs=llm_kwargs, output_key=output_key, **kwargs)
return llm_chain | [Legacy] Create an LLM chain that uses Ernie functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid Ernie functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the Ernie function-calling API.
prompt: BasePromptTemplate to pass to the model.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
Returns:
An LLMChain that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.ernie_functions import create_ernie_fn_chain
from langchain_community.chat_models import ErnieBotChat
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
class RecordPerson(BaseModel):
"""Record some identifying information about a person."""
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
"""Record some identifying information about a dog."""
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ErnieBotChat(model_name="ERNIE-Bot-4")
prompt = ChatPromptTemplate.from_messages(
[
("user", "Make calls to the relevant function to record the entities in the following input: {input}"),
("assistant", "OK!"),
("user", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_ernie_fn_chain([RecordPerson, RecordDog], llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken") |
on_agent_finish | if self._current_thought is not None:
self._current_thought.complete(self._thought_labeler.
get_final_agent_thought_label())
self._current_thought = None | def on_agent_finish(self, finish: AgentFinish, color: Optional[str]=None,
**kwargs: Any) ->None:
if self._current_thought is not None:
self._current_thought.complete(self._thought_labeler.
get_final_agent_thought_label())
self._current_thought = None | null |
_wait_until | """Sleeps until meth() evaluates to true. Passes kwargs into
meth.
"""
start = datetime.now()
while not method(**method_params):
curr = datetime.now()
if (curr - start).total_seconds() * 1000 > timeout:
raise TimeoutError(f'{method} timed out at {timeout} ms')
sleep(RocksetChatMessageHistory.SLEEP_INTERVAL_MS / 1000) | def _wait_until(self, method: Callable, timeout: int, **method_params: Any
) ->None:
"""Sleeps until meth() evaluates to true. Passes kwargs into
meth.
"""
start = datetime.now()
while not method(**method_params):
curr = datetime.now()
if (curr - start).total_seconds() * 1000 > timeout:
raise TimeoutError(f'{method} timed out at {timeout} ms')
sleep(RocksetChatMessageHistory.SLEEP_INTERVAL_MS / 1000) | Sleeps until meth() evaluates to true. Passes kwargs into
meth. |
test_parse_api_operations | """Test the APIOperation class."""
for spec_name, spec, path, method in http_paths_and_methods():
try:
APIOperation.from_openapi_spec(spec, path, method)
except Exception as e:
raise AssertionError(f'Error processing {spec_name}: {e} ') from e | @pytest.mark.requires('openapi_pydantic')
def test_parse_api_operations() ->None:
"""Test the APIOperation class."""
for spec_name, spec, path, method in http_paths_and_methods():
try:
APIOperation.from_openapi_spec(spec, path, method)
except Exception as e:
raise AssertionError(f'Error processing {spec_name}: {e} ') from e | Test the APIOperation class. |
memory_variables | """The string keys this memory class will add to chain inputs.""" | @property
@abstractmethod
def memory_variables(self) ->List[str]:
"""The string keys this memory class will add to chain inputs.""" | The string keys this memory class will add to chain inputs. |
test_chat_openai_system_message | """Test ChatOpenAI wrapper with system message."""
chat = ChatOpenAI(max_tokens=10)
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | def test_chat_openai_system_message() ->None:
"""Test ChatOpenAI wrapper with system message."""
chat = ChatOpenAI(max_tokens=10)
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | Test ChatOpenAI wrapper with system message. |
test_get_code_lines_simple_nested_ticks | """Test that backticks w/o a newline are ignored."""
text = """
Unrelated text
```bash
echo hello
echo "```bash is in this string```"
```
"""
code_lines = output_parser.parse_folder(text)
assert code_lines == ['echo hello', 'echo "```bash is in this string```"'] | def test_get_code_lines_simple_nested_ticks(output_parser: BashOutputParser
) ->None:
"""Test that backticks w/o a newline are ignored."""
text = (
'\nUnrelated text\n```bash\necho hello\necho "```bash is in this string```"\n```\n'
)
code_lines = output_parser.parse_folder(text)
assert code_lines == ['echo hello', 'echo "```bash is in this string```"'] | Test that backticks w/o a newline are ignored. |
test_partial_init_string | """Test prompt can be initialized with partial variables."""
template = 'This is a {foo} test.'
prompt = PromptTemplate(input_variables=[], template=template,
partial_variables={'foo': 1})
assert prompt.template == template
assert prompt.input_variables == []
result = prompt.format()
assert result == 'This is a 1 test.' | def test_partial_init_string() ->None:
"""Test prompt can be initialized with partial variables."""
template = 'This is a {foo} test.'
prompt = PromptTemplate(input_variables=[], template=template,
partial_variables={'foo': 1})
assert prompt.template == template
assert prompt.input_variables == []
result = prompt.format()
assert result == 'This is a 1 test.' | Test prompt can be initialized with partial variables. |
from_texts | """Return DashVector VectorStore initialized from texts and embeddings.
This is the quick way to get started with dashvector vector store.
Example:
.. code-block:: python
from langchain_community.vectorstores import DashVector
from langchain_community.embeddings import OpenAIEmbeddings
import dashvector
embeddings = OpenAIEmbeddings()
dashvector = DashVector.from_documents(
docs,
embeddings,
dashvector_api_key="{DASHVECTOR_API_KEY}"
)
"""
try:
import dashvector
except ImportError:
raise ValueError(
'Could not import dashvector python package. Please install it with `pip install dashvector`.'
)
dashvector_api_key = dashvector_api_key or get_from_env('dashvector_api_key',
'DASHVECTOR_API_KEY')
dashvector_endpoint = dashvector_endpoint or get_from_env('dashvector_endpoint'
, 'DASHVECTOR_ENDPOINT', default='dashvector.cn-hangzhou.aliyuncs.com')
dashvector_client = dashvector.Client(api_key=dashvector_api_key, endpoint=
dashvector_endpoint)
dashvector_client.delete(collection_name)
collection = dashvector_client.get(collection_name)
if not collection:
dim = len(embedding.embed_query(texts[0]))
resp = dashvector_client.create(collection_name, dimension=dim)
if resp:
collection = dashvector_client.get(collection_name)
else:
raise ValueError(f'Fail to create collection. Error: {resp.message}.')
dashvector_vector_db = cls(collection, embedding, text_field)
dashvector_vector_db.add_texts(texts, metadatas, ids, batch_size)
return dashvector_vector_db | @classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, dashvector_api_key: Optional[str]=None,
dashvector_endpoint: Optional[str]=None, collection_name: str=
'langchain', text_field: str='text', batch_size: int=25, ids: Optional[
List[str]]=None, **kwargs: Any) ->DashVector:
"""Return DashVector VectorStore initialized from texts and embeddings.
This is the quick way to get started with dashvector vector store.
Example:
.. code-block:: python
from langchain_community.vectorstores import DashVector
from langchain_community.embeddings import OpenAIEmbeddings
import dashvector
embeddings = OpenAIEmbeddings()
dashvector = DashVector.from_documents(
docs,
embeddings,
dashvector_api_key="{DASHVECTOR_API_KEY}"
)
"""
try:
import dashvector
except ImportError:
raise ValueError(
'Could not import dashvector python package. Please install it with `pip install dashvector`.'
)
dashvector_api_key = dashvector_api_key or get_from_env(
'dashvector_api_key', 'DASHVECTOR_API_KEY')
dashvector_endpoint = dashvector_endpoint or get_from_env(
'dashvector_endpoint', 'DASHVECTOR_ENDPOINT', default=
'dashvector.cn-hangzhou.aliyuncs.com')
dashvector_client = dashvector.Client(api_key=dashvector_api_key,
endpoint=dashvector_endpoint)
dashvector_client.delete(collection_name)
collection = dashvector_client.get(collection_name)
if not collection:
dim = len(embedding.embed_query(texts[0]))
resp = dashvector_client.create(collection_name, dimension=dim)
if resp:
collection = dashvector_client.get(collection_name)
else:
raise ValueError(
f'Fail to create collection. Error: {resp.message}.')
dashvector_vector_db = cls(collection, embedding, text_field)
dashvector_vector_db.add_texts(texts, metadatas, ids, batch_size)
return dashvector_vector_db | Return DashVector VectorStore initialized from texts and embeddings.
This is the quick way to get started with dashvector vector store.
Example:
.. code-block:: python
from langchain_community.vectorstores import DashVector
from langchain_community.embeddings import OpenAIEmbeddings
import dashvector
embeddings = OpenAIEmbeddings()
dashvector = DashVector.from_documents(
docs,
embeddings,
dashvector_api_key="{DASHVECTOR_API_KEY}"
) |
_get_internal_distance_strategy | """Return the internal distance strategy."""
if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return 'euclidean'
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
raise ValueError('Max inner product is not supported by SemaDB')
elif self.distance_strategy == DistanceStrategy.DOT_PRODUCT:
return 'dot'
elif self.distance_strategy == DistanceStrategy.JACCARD:
raise ValueError('Max inner product is not supported by SemaDB')
elif self.distance_strategy == DistanceStrategy.COSINE:
return 'cosine'
else:
raise ValueError(f'Unknown distance strategy {self.distance_strategy}') | def _get_internal_distance_strategy(self) ->str:
"""Return the internal distance strategy."""
if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return 'euclidean'
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
raise ValueError('Max inner product is not supported by SemaDB')
elif self.distance_strategy == DistanceStrategy.DOT_PRODUCT:
return 'dot'
elif self.distance_strategy == DistanceStrategy.JACCARD:
raise ValueError('Max inner product is not supported by SemaDB')
elif self.distance_strategy == DistanceStrategy.COSINE:
return 'cosine'
else:
raise ValueError(f'Unknown distance strategy {self.distance_strategy}') | Return the internal distance strategy. |
test_misannotated_base_tool_raises_error | """Test that a BaseTool with the incorrect typehint raises an exception."""
with pytest.raises(SchemaAnnotationError):
class _MisAnnotatedTool(BaseTool):
name: str = 'structured_api'
args_schema: BaseModel = _MockSchema
description: str = 'A Structured Tool'
def _run(self, arg1: int, arg2: bool, arg3: Optional[dict]=None) ->str:
return f'{arg1} {arg2} {arg3}'
async def _arun(self, arg1: int, arg2: bool, arg3: Optional[dict]=None
) ->str:
raise NotImplementedError | def test_misannotated_base_tool_raises_error() ->None:
"""Test that a BaseTool with the incorrect typehint raises an exception."""
with pytest.raises(SchemaAnnotationError):
class _MisAnnotatedTool(BaseTool):
name: str = 'structured_api'
args_schema: BaseModel = _MockSchema
description: str = 'A Structured Tool'
def _run(self, arg1: int, arg2: bool, arg3: Optional[dict]=None
) ->str:
return f'{arg1} {arg2} {arg3}'
async def _arun(self, arg1: int, arg2: bool, arg3: Optional[
dict]=None) ->str:
raise NotImplementedError | Test that a BaseTool with the incorrect typehint raises an exception. |
_get_input_messages | from langchain_core.messages import BaseMessage
if isinstance(input_val, str):
from langchain_core.messages import HumanMessage
return [HumanMessage(content=input_val)]
elif isinstance(input_val, BaseMessage):
return [input_val]
elif isinstance(input_val, (list, tuple)):
return list(input_val)
else:
raise ValueError(
f'Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. Got {input_val}.'
) | def _get_input_messages(self, input_val: Union[str, BaseMessage, Sequence[
BaseMessage]]) ->List[BaseMessage]:
from langchain_core.messages import BaseMessage
if isinstance(input_val, str):
from langchain_core.messages import HumanMessage
return [HumanMessage(content=input_val)]
elif isinstance(input_val, BaseMessage):
return [input_val]
elif isinstance(input_val, (list, tuple)):
return list(input_val)
else:
raise ValueError(
f'Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. Got {input_val}.'
) | null |
_select_relevance_score_fn | """
The underlying API calls already returns a "score proper",
i.e. one in [0, 1] where higher means more *similar*,
so here the final score transformation is not reversing the interval:
"""
return self._dont_flip_the_cos_score | def _select_relevance_score_fn(self) ->Callable[[float], float]:
"""
The underlying API calls already returns a "score proper",
i.e. one in [0, 1] where higher means more *similar*,
so here the final score transformation is not reversing the interval:
"""
return self._dont_flip_the_cos_score | The underlying API calls already returns a "score proper",
i.e. one in [0, 1] where higher means more *similar*,
so here the final score transformation is not reversing the interval: |
generate_img_summaries | """
Generate summaries for images
:param img_base64_list: Base64 encoded images
:return: List of image summaries and processed images
"""
image_summaries = []
processed_images = []
prompt = 'Give a detailed summary of the image.'
for i, base64_image in enumerate(img_base64_list):
try:
image_summaries.append(image_summarize(base64_image, prompt))
processed_images.append(base64_image)
except Exception as e:
print(f'Error with image {i + 1}: {e}')
return image_summaries, processed_images | def generate_img_summaries(img_base64_list):
"""
Generate summaries for images
:param img_base64_list: Base64 encoded images
:return: List of image summaries and processed images
"""
image_summaries = []
processed_images = []
prompt = 'Give a detailed summary of the image.'
for i, base64_image in enumerate(img_base64_list):
try:
image_summaries.append(image_summarize(base64_image, prompt))
processed_images.append(base64_image)
except Exception as e:
print(f'Error with image {i + 1}: {e}')
return image_summaries, processed_images | Generate summaries for images
:param img_base64_list: Base64 encoded images
:return: List of image summaries and processed images |