method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
_import_huggingface_text_gen_inference | from langchain_community.llms.huggingface_text_gen_inference import HuggingFaceTextGenInference
return HuggingFaceTextGenInference | def _import_huggingface_text_gen_inference() ->Any:
from langchain_community.llms.huggingface_text_gen_inference import HuggingFaceTextGenInference
return HuggingFaceTextGenInference | null |
__init__ | self.model_id = model_id or '.elser_model_1' | def __init__(self, model_id: Optional[str]=None):
self.model_id = model_id or '.elser_model_1' | null |
__init__ | """
Initialize the AgentExecutorIterator with the given AgentExecutor,
inputs, and optional callbacks.
"""
self._agent_executor = agent_executor
self.inputs = inputs
self.callbacks = callbacks
self.tags = tags
self.metadata = metadata
self.run_name = run_name
self.include_run_info = include_run_info
self.yield_actions = yield_actions
self.reset() | def __init__(self, agent_executor: AgentExecutor, inputs: Any, callbacks:
Callbacks=None, *, tags: Optional[list[str]]=None, metadata: Optional[
Dict[str, Any]]=None, run_name: Optional[str]=None, include_run_info:
bool=False, yield_actions: bool=False):
"""
Initialize the AgentExecutorIterator with the given AgentExecutor,
inputs, and optional callbacks.
"""
self._agent_executor = agent_executor
self.inputs = inputs
self.callbacks = callbacks
self.tags = tags
self.metadata = metadata
self.run_name = run_name
self.include_run_info = include_run_info
self.yield_actions = yield_actions
self.reset() | Initialize the AgentExecutorIterator with the given AgentExecutor,
inputs, and optional callbacks. |
test_parse_list_value | _test_parse_value(x) | @pytest.mark.parametrize('x', ([], [1, 'b', 'true']))
def test_parse_list_value(x: list) ->None:
_test_parse_value(x) | null |
_convert_prompt_msg_params | model_req = {'model': {'name': self.model}}
if self.model_version is not None:
model_req['model']['version'] = self.model_version
return {**model_req, 'messages': [{'role': 'user', 'content': prompt}],
'parameters': {**self._default_params, **kwargs}} | def _convert_prompt_msg_params(self, prompt: str, **kwargs: Any) ->dict:
model_req = {'model': {'name': self.model}}
if self.model_version is not None:
model_req['model']['version'] = self.model_version
return {**model_req, 'messages': [{'role': 'user', 'content': prompt}],
'parameters': {**self._default_params, **kwargs}} | null |
add_message | """Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL('INSERT INTO {} (session_id, message) VALUES (%s, %s);'
).format(sql.Identifier(self.table_name))
self.cursor.execute(query, (self.session_id, json.dumps(message_to_dict(
message))))
self.connection.commit() | def add_message(self, message: BaseMessage) ->None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL('INSERT INTO {} (session_id, message) VALUES (%s, %s);'
).format(sql.Identifier(self.table_name))
self.cursor.execute(query, (self.session_id, json.dumps(message_to_dict
(message))))
self.connection.commit() | Append the message to the record in PostgreSQL |
embed_query | return self._get_embedding() | def embed_query(self, text: str) ->List[float]:
return self._get_embedding() | null |
similarity_search_by_vector_with_relevance_scores | """
Return docs most similar to embedding vector and similarity score.
"""
results = self.__query_cluster(query_embeddings=query_embeddings, n_results
=k, where=where)
return _results_to_docs_and_scores(results) | def similarity_search_by_vector_with_relevance_scores(self,
query_embeddings: List[float], k: int=DEFAULT_K, where: Optional[Dict[
str, str]]=None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""
Return docs most similar to embedding vector and similarity score.
"""
results = self.__query_cluster(query_embeddings=query_embeddings,
n_results=k, where=where)
return _results_to_docs_and_scores(results) | Return docs most similar to embedding vector and similarity score. |
test_prompt_from_file | """Test prompt can be successfully constructed from a file."""
template_file = 'tests/unit_tests/data/prompt_file.txt'
input_variables = ['question']
prompt = PromptTemplate.from_file(template_file, input_variables)
assert prompt.template == """Question: {question}
Answer:""" | def test_prompt_from_file() ->None:
"""Test prompt can be successfully constructed from a file."""
template_file = 'tests/unit_tests/data/prompt_file.txt'
input_variables = ['question']
prompt = PromptTemplate.from_file(template_file, input_variables)
assert prompt.template == 'Question: {question}\nAnswer:' | Test prompt can be successfully constructed from a file. |
test_cassandra_cache_ttl | session, keyspace = cassandra_connection
cache = CassandraCache(session=session, keyspace=keyspace, ttl_seconds=2)
set_llm_cache(cache)
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
expected_output = LLMResult(generations=[[Generation(text='fizz')]],
llm_output={})
output = llm.generate(['foo'])
assert output == expected_output
time.sleep(2.5)
output = llm.generate(['foo'])
assert output != expected_output
cache.clear() | def test_cassandra_cache_ttl(cassandra_connection: Tuple[Any, str]) ->None:
session, keyspace = cassandra_connection
cache = CassandraCache(session=session, keyspace=keyspace, ttl_seconds=2)
set_llm_cache(cache)
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
expected_output = LLMResult(generations=[[Generation(text='fizz')]],
llm_output={})
output = llm.generate(['foo'])
assert output == expected_output
time.sleep(2.5)
output = llm.generate(['foo'])
assert output != expected_output
cache.clear() | null |
es_client | from elastic_transport import Transport
from elasticsearch import Elasticsearch
class CustomTransport(Transport):
requests = []
def perform_request(self, *args, **kwargs):
self.requests.append(kwargs)
return super().perform_request(*args, **kwargs)
es_url = os.environ.get('ES_URL', 'http://localhost:9200')
cloud_id = os.environ.get('ES_CLOUD_ID')
es_username = os.environ.get('ES_USERNAME', 'elastic')
es_password = os.environ.get('ES_PASSWORD', 'changeme')
if cloud_id:
es = Elasticsearch(cloud_id=cloud_id, basic_auth=(es_username,
es_password), transport_class=CustomTransport)
return es
else:
es = Elasticsearch(hosts=es_url, transport_class=CustomTransport)
return es | @pytest.fixture(scope='function')
def es_client(self) ->Any:
from elastic_transport import Transport
from elasticsearch import Elasticsearch
class CustomTransport(Transport):
requests = []
def perform_request(self, *args, **kwargs):
self.requests.append(kwargs)
return super().perform_request(*args, **kwargs)
es_url = os.environ.get('ES_URL', 'http://localhost:9200')
cloud_id = os.environ.get('ES_CLOUD_ID')
es_username = os.environ.get('ES_USERNAME', 'elastic')
es_password = os.environ.get('ES_PASSWORD', 'changeme')
if cloud_id:
es = Elasticsearch(cloud_id=cloud_id, basic_auth=(es_username,
es_password), transport_class=CustomTransport)
return es
else:
es = Elasticsearch(hosts=es_url, transport_class=CustomTransport)
return es | null |
_import_together | from langchain_community.llms.together import Together
return Together | def _import_together() ->Any:
from langchain_community.llms.together import Together
return Together | null |
getter | return ContextGet(key=key) | @staticmethod
def getter(key: Union[str, List[str]], /) ->ContextGet:
return ContextGet(key=key) | null |
_prepare_input_and_invoke | _model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
accept = 'application/json'
contentType = 'application/json'
try:
response = self.client.invoke_model(body=body, modelId=self.model_id,
accept=accept, contentType=contentType)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f'Error raised by bedrock service: {e}').with_traceback(e
.__traceback__)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | def _prepare_input_and_invoke(self, prompt: str, stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->str:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
accept = 'application/json'
contentType = 'application/json'
try:
response = self.client.invoke_model(body=body, modelId=self.
model_id, accept=accept, contentType=contentType)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f'Error raised by bedrock service: {e}'
).with_traceback(e.__traceback__)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | null |
is_valid | return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines) | def is_valid(self) ->bool:
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines) | null |
test_prompt_jinja2_extra_input_variables | """Test error is raised when there are too many input variables."""
template = 'This is a {{ foo }} test.'
input_variables = ['foo', 'bar']
with pytest.warns(UserWarning):
PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2', validate_template=True)
assert PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2').input_variables == ['foo'] | @pytest.mark.requires('jinja2')
def test_prompt_jinja2_extra_input_variables() ->None:
"""Test error is raised when there are too many input variables."""
template = 'This is a {{ foo }} test.'
input_variables = ['foo', 'bar']
with pytest.warns(UserWarning):
PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2', validate_template=True)
assert PromptTemplate(input_variables=input_variables, template=
template, template_format='jinja2').input_variables == ['foo'] | Test error is raised when there are too many input variables. |
__init__ | warnings.warn(
'langchain_community.document_loaders.parsers.pdf.DocumentIntelligenceParserand langchain_community.document_loaders.pdf.DocumentIntelligenceLoader are deprecated. Please upgrade to langchain_community.document_loaders.DocumentIntelligenceLoader for any file parsing purpose using Azure Document Intelligence service.'
)
self.client = client
self.model = model | def __init__(self, client: Any, model: str):
warnings.warn(
'langchain_community.document_loaders.parsers.pdf.DocumentIntelligenceParserand langchain_community.document_loaders.pdf.DocumentIntelligenceLoader are deprecated. Please upgrade to langchain_community.document_loaders.DocumentIntelligenceLoader for any file parsing purpose using Azure Document Intelligence service.'
)
self.client = client
self.model = model | null |
test_deprecated_property | """Test deprecated staticmethod."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
obj = ClassWithDeprecatedMethods()
assert obj.deprecated_property == 'This is a deprecated property.'
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning
) == 'The function `deprecated_property` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
doc = ClassWithDeprecatedMethods.deprecated_property.__doc__
assert isinstance(doc, str)
assert doc.startswith('[*Deprecated*] original doc') | def test_deprecated_property() ->None:
"""Test deprecated staticmethod."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
obj = ClassWithDeprecatedMethods()
assert obj.deprecated_property == 'This is a deprecated property.'
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning
) == 'The function `deprecated_property` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
doc = ClassWithDeprecatedMethods.deprecated_property.__doc__
assert isinstance(doc, str)
assert doc.startswith('[*Deprecated*] original doc') | Test deprecated staticmethod. |
delete_collection | """Delete the collection."""
self._client.delete_collection(self._collection.name) | def delete_collection(self) ->None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name) | Delete the collection. |
save_history | input['context'] = [el.metadata['id'] for el in input['context']]
has_history = bool(input.pop('chat_history'))
if has_history:
graph.query(
"""
MATCH (u:User {id: $user_id})-[:HAS_SESSION]->(s:Session{id: $session_id}),
(s)-[l:LAST_MESSAGE]->(last_message)
CREATE (last_message)-[:NEXT]->(q:Question
{text:$question, rephrased:$rephrased_question, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}),
(s)-[:LAST_MESSAGE]->(q)
DELETE l
WITH q
UNWIND $context AS c
MATCH (n) WHERE elementId(n) = c
MERGE (q)-[:RETRIEVED]->(n)
"""
, params=input)
else:
graph.query(
"""MERGE (u:User {id: $user_id})
CREATE (u)-[:HAS_SESSION]->(s1:Session {id:$session_id}),
(s1)-[:LAST_MESSAGE]->(q:Question
{text:$question, rephrased:$rephrased_question, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output})
WITH q
UNWIND $context AS c
MATCH (n) WHERE elementId(n) = c
MERGE (q)-[:RETRIEVED]->(n)
"""
, params=input)
return input['output'] | def save_history(input: Dict[str, Any]) ->str:
input['context'] = [el.metadata['id'] for el in input['context']]
has_history = bool(input.pop('chat_history'))
if has_history:
graph.query(
"""
MATCH (u:User {id: $user_id})-[:HAS_SESSION]->(s:Session{id: $session_id}),
(s)-[l:LAST_MESSAGE]->(last_message)
CREATE (last_message)-[:NEXT]->(q:Question
{text:$question, rephrased:$rephrased_question, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}),
(s)-[:LAST_MESSAGE]->(q)
DELETE l
WITH q
UNWIND $context AS c
MATCH (n) WHERE elementId(n) = c
MERGE (q)-[:RETRIEVED]->(n)
"""
, params=input)
else:
graph.query(
"""MERGE (u:User {id: $user_id})
CREATE (u)-[:HAS_SESSION]->(s1:Session {id:$session_id}),
(s1)-[:LAST_MESSAGE]->(q:Question
{text:$question, rephrased:$rephrased_question, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output})
WITH q
UNWIND $context AS c
MATCH (n) WHERE elementId(n) = c
MERGE (q)-[:RETRIEVED]->(n)
"""
, params=input)
return input['output'] | null |
clear | return self.store.clear() | def clear(self) ->None:
return self.store.clear() | null |
update | """Update the deanonymizer mapping with new values
Duplicated values will not be added
If there are multiple entities of the same type, the mapping will
include a count to differentiate them. For example, if there are
two names in the input text, the mapping will include NAME_1 and NAME_2.
"""
seen_values = set()
for entity_type, values in new_mapping.items():
count = len(self.mapping[entity_type]) + 1
for key, value in values.items():
if value not in seen_values and value not in self.mapping[entity_type
].values():
new_key = format_duplicated_operator(key, count
) if key in self.mapping[entity_type] else key
self.mapping[entity_type][new_key] = value
seen_values.add(value)
count += 1 | def update(self, new_mapping: MappingDataType) ->None:
"""Update the deanonymizer mapping with new values
Duplicated values will not be added
If there are multiple entities of the same type, the mapping will
include a count to differentiate them. For example, if there are
two names in the input text, the mapping will include NAME_1 and NAME_2.
"""
seen_values = set()
for entity_type, values in new_mapping.items():
count = len(self.mapping[entity_type]) + 1
for key, value in values.items():
if value not in seen_values and value not in self.mapping[
entity_type].values():
new_key = format_duplicated_operator(key, count
) if key in self.mapping[entity_type] else key
self.mapping[entity_type][new_key] = value
seen_values.add(value)
count += 1 | Update the deanonymizer mapping with new values
Duplicated values will not be added
If there are multiple entities of the same type, the mapping will
include a count to differentiate them. For example, if there are
two names in the input text, the mapping will include NAME_1 and NAME_2. |
_lazy_load_arthur | """Lazy load Arthur."""
try:
import arthurai
except ImportError as e:
raise ImportError(
'To use the ArthurCallbackHandler you need the `arthurai` package. Please install it with `pip install arthurai`.'
, e)
return arthurai | def _lazy_load_arthur() ->arthurai:
"""Lazy load Arthur."""
try:
import arthurai
except ImportError as e:
raise ImportError(
'To use the ArthurCallbackHandler you need the `arthurai` package. Please install it with `pip install arthurai`.'
, e)
return arthurai | Lazy load Arthur. |
test_exact_matching_strategy | """
Test exact matching strategy for deanonymization.
"""
from langchain_experimental.data_anonymizer import deanonymizer_matching_strategies as dms
deanonymizer_mapping = {'PERSON': {'Maria Lynch': 'Slim Shady'},
'PHONE_NUMBER': {'7344131647': '313-666-7440'}, 'EMAIL_ADDRESS': {
'[email protected]': '[email protected]'}, 'CREDIT_CARD': {
'213186379402654': '4916 0387 9536 0861'}}
text = (
'Are you Maria Lynch? I found your card with number 213186379402654. Is this your phone number: 7344131647? Is this your email address: [email protected]'
)
deanonymized_text = dms.exact_matching_strategy(text, deanonymizer_mapping)
for original_value in ['Slim Shady', '313-666-7440',
'[email protected]', '4916 0387 9536 0861']:
assert original_value in deanonymized_text | @pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
def test_exact_matching_strategy() ->None:
"""
Test exact matching strategy for deanonymization.
"""
from langchain_experimental.data_anonymizer import deanonymizer_matching_strategies as dms
deanonymizer_mapping = {'PERSON': {'Maria Lynch': 'Slim Shady'},
'PHONE_NUMBER': {'7344131647': '313-666-7440'}, 'EMAIL_ADDRESS': {
'[email protected]': '[email protected]'}, 'CREDIT_CARD':
{'213186379402654': '4916 0387 9536 0861'}}
text = (
'Are you Maria Lynch? I found your card with number 213186379402654. Is this your phone number: 7344131647? Is this your email address: [email protected]'
)
deanonymized_text = dms.exact_matching_strategy(text, deanonymizer_mapping)
for original_value in ['Slim Shady', '313-666-7440',
'[email protected]', '4916 0387 9536 0861']:
assert original_value in deanonymized_text | Test exact matching strategy for deanonymization. |
add | """
Adds the specified template to the current LangServe app.
e.g.:
langchain app add extraction-openai-functions
langchain app add git+ssh://[email protected]/efriis/simple-pirate.git
"""
parsed_deps = parse_dependencies(dependencies, repo, branch, api_path)
project_root = get_package_root(project_dir)
package_dir = project_root / 'packages'
create_events([{'event': 'serve add', 'properties': dict(parsed_dep=d)} for
d in parsed_deps])
grouped: Dict[Tuple[str, Optional[str]], List[DependencySource]] = {}
for dep in parsed_deps:
key_tup = dep['git'], dep['ref']
lst = grouped.get(key_tup, [])
lst.append(dep)
grouped[key_tup] = lst
installed_destination_paths: List[Path] = []
installed_destination_names: List[str] = []
installed_exports: List[LangServeExport] = []
for (git, ref), group_deps in grouped.items():
if len(group_deps) == 1:
typer.echo(f'Adding {git}@{ref}...')
else:
typer.echo(f'Adding {len(group_deps)} templates from {git}@{ref}')
source_repo_path = update_repo(git, ref, REPO_DIR)
for dep in group_deps:
source_path = source_repo_path / dep['subdirectory'] if dep[
'subdirectory'] else source_repo_path
pyproject_path = source_path / 'pyproject.toml'
if not pyproject_path.exists():
typer.echo(f'Could not find {pyproject_path}')
continue
langserve_export = get_langserve_export(pyproject_path)
inner_api_path = dep['api_path'] or langserve_export['package_name']
destination_path = package_dir / inner_api_path
if destination_path.exists():
typer.echo(
f'Folder {str(inner_api_path)} already exists. Skipping...')
continue
copy_repo(source_path, destination_path)
typer.echo(f" - Downloaded {dep['subdirectory']} to {inner_api_path}")
installed_destination_paths.append(destination_path)
installed_destination_names.append(inner_api_path)
installed_exports.append(langserve_export)
if len(installed_destination_paths) == 0:
typer.echo('No packages installed. Exiting.')
return
try:
add_dependencies_to_pyproject_toml(project_root / 'pyproject.toml', zip
(installed_destination_names, installed_destination_paths))
except Exception:
typer.echo('Failed to add dependencies to pyproject.toml, continuing...')
try:
cwd = Path.cwd()
installed_destination_strs = [str(p.relative_to(cwd)) for p in
installed_destination_paths]
except ValueError:
typer.echo('Failed to print install command, continuing...')
else:
if pip:
cmd = ['pip', 'install', '-e'] + installed_destination_strs
cmd_str = ' \\\n '.join(installed_destination_strs)
typer.echo(f'Running: pip install -e \\\n {cmd_str}')
subprocess.run(cmd, cwd=cwd)
chain_names = []
for e in installed_exports:
original_candidate = f"{e['package_name'].replace('-', '_')}_chain"
candidate = original_candidate
i = 2
while candidate in chain_names:
candidate = original_candidate + '_' + str(i)
i += 1
chain_names.append(candidate)
api_paths = [str(Path('/') / path.relative_to(package_dir)) for path in
installed_destination_paths]
imports = [f"from {e['module']} import {e['attr']} as {name}" for e, name in
zip(installed_exports, chain_names)]
routes = [f'add_routes(app, {name}, path="{path}")' for name, path in zip(
chain_names, api_paths)]
t = 'this template' if len(chain_names
) == 1 else f'these {len(chain_names)} templates'
lines = ['', f"""To use {t}, add the following to your app:
```""", ''
] + imports + [''] + routes + ['```']
typer.echo('\n'.join(lines)) | @app_cli.command()
def add(dependencies: Annotated[Optional[List[str]], typer.Argument(help=
'The dependency to add')]=None, *, api_path: Annotated[List[str], typer
.Option(help='API paths to add')]=[], project_dir: Annotated[Optional[
Path], typer.Option(help='The project directory')]=None, repo:
Annotated[List[str], typer.Option(help=
'Install templates from a specific github repo instead')]=[], branch:
Annotated[List[str], typer.Option(help=
'Install templates from a specific branch')]=[], pip: Annotated[bool,
typer.Option('--pip/--no-pip', help=
'Pip install the template(s) as editable dependencies', is_flag=True,
prompt='Would you like to `pip install -e` the template(s)?')]):
"""
Adds the specified template to the current LangServe app.
e.g.:
langchain app add extraction-openai-functions
langchain app add git+ssh://[email protected]/efriis/simple-pirate.git
"""
parsed_deps = parse_dependencies(dependencies, repo, branch, api_path)
project_root = get_package_root(project_dir)
package_dir = project_root / 'packages'
create_events([{'event': 'serve add', 'properties': dict(parsed_dep=d)} for
d in parsed_deps])
grouped: Dict[Tuple[str, Optional[str]], List[DependencySource]] = {}
for dep in parsed_deps:
key_tup = dep['git'], dep['ref']
lst = grouped.get(key_tup, [])
lst.append(dep)
grouped[key_tup] = lst
installed_destination_paths: List[Path] = []
installed_destination_names: List[str] = []
installed_exports: List[LangServeExport] = []
for (git, ref), group_deps in grouped.items():
if len(group_deps) == 1:
typer.echo(f'Adding {git}@{ref}...')
else:
typer.echo(f'Adding {len(group_deps)} templates from {git}@{ref}')
source_repo_path = update_repo(git, ref, REPO_DIR)
for dep in group_deps:
source_path = source_repo_path / dep['subdirectory'] if dep[
'subdirectory'] else source_repo_path
pyproject_path = source_path / 'pyproject.toml'
if not pyproject_path.exists():
typer.echo(f'Could not find {pyproject_path}')
continue
langserve_export = get_langserve_export(pyproject_path)
inner_api_path = dep['api_path'] or langserve_export['package_name'
]
destination_path = package_dir / inner_api_path
if destination_path.exists():
typer.echo(
f'Folder {str(inner_api_path)} already exists. Skipping...'
)
continue
copy_repo(source_path, destination_path)
typer.echo(
f" - Downloaded {dep['subdirectory']} to {inner_api_path}")
installed_destination_paths.append(destination_path)
installed_destination_names.append(inner_api_path)
installed_exports.append(langserve_export)
if len(installed_destination_paths) == 0:
typer.echo('No packages installed. Exiting.')
return
try:
add_dependencies_to_pyproject_toml(project_root / 'pyproject.toml',
zip(installed_destination_names, installed_destination_paths))
except Exception:
typer.echo(
'Failed to add dependencies to pyproject.toml, continuing...')
try:
cwd = Path.cwd()
installed_destination_strs = [str(p.relative_to(cwd)) for p in
installed_destination_paths]
except ValueError:
typer.echo('Failed to print install command, continuing...')
else:
if pip:
cmd = ['pip', 'install', '-e'] + installed_destination_strs
cmd_str = ' \\\n '.join(installed_destination_strs)
typer.echo(f'Running: pip install -e \\\n {cmd_str}')
subprocess.run(cmd, cwd=cwd)
chain_names = []
for e in installed_exports:
original_candidate = f"{e['package_name'].replace('-', '_')}_chain"
candidate = original_candidate
i = 2
while candidate in chain_names:
candidate = original_candidate + '_' + str(i)
i += 1
chain_names.append(candidate)
api_paths = [str(Path('/') / path.relative_to(package_dir)) for path in
installed_destination_paths]
imports = [f"from {e['module']} import {e['attr']} as {name}" for e,
name in zip(installed_exports, chain_names)]
routes = [f'add_routes(app, {name}, path="{path}")' for name, path in
zip(chain_names, api_paths)]
t = 'this template' if len(chain_names
) == 1 else f'these {len(chain_names)} templates'
lines = ['', f'To use {t}, add the following to your app:\n\n```', ''
] + imports + [''] + routes + ['```']
typer.echo('\n'.join(lines)) | Adds the specified template to the current LangServe app.
e.g.:
langchain app add extraction-openai-functions
langchain app add git+ssh://[email protected]/efriis/simple-pirate.git |
_import_dingo | from langchain_community.vectorstores.dingo import Dingo
return Dingo | def _import_dingo() ->Any:
from langchain_community.vectorstores.dingo import Dingo
return Dingo | null |
_call_after_scoring_before_learning | if event.selected:
event.selected.score = score
return event | def _call_after_scoring_before_learning(self, event: PickBestEvent, score:
Optional[float]) ->PickBestEvent:
if event.selected:
event.selected.score = score
return event | null |
get_kwargs | kwargs = super().get_kwargs()
for key in ['reference_key', 'prediction_key', 'input_key']:
kwargs.pop(key, None)
return kwargs | def get_kwargs(self) ->Dict[str, Any]:
kwargs = super().get_kwargs()
for key in ['reference_key', 'prediction_key', 'input_key']:
kwargs.pop(key, None)
return kwargs | null |
from_documents | """
Return VectorStore initialized from documents and embeddings.
Postgres Connection string is required
Either pass it as a parameter
or set the PG_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs['connection_string'] = connection_string
return cls.from_texts(texts=texts, pre_delete_collection=
pre_delete_collection, embedding=embedding, embedding_dimension=
embedding_dimension, metadatas=metadatas, ids=ids, collection_name=
collection_name, engine_args=engine_args, **kwargs) | @classmethod
def from_documents(cls: Type[AnalyticDB], documents: List[Document],
embedding: Embeddings, embedding_dimension: int=
_LANGCHAIN_DEFAULT_EMBEDDING_DIM, collection_name: str=
_LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]]=None,
pre_delete_collection: bool=False, engine_args: Optional[dict]=None, **
kwargs: Any) ->AnalyticDB:
"""
Return VectorStore initialized from documents and embeddings.
Postgres Connection string is required
Either pass it as a parameter
or set the PG_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs['connection_string'] = connection_string
return cls.from_texts(texts=texts, pre_delete_collection=
pre_delete_collection, embedding=embedding, embedding_dimension=
embedding_dimension, metadatas=metadatas, ids=ids, collection_name=
collection_name, engine_args=engine_args, **kwargs) | Return VectorStore initialized from documents and embeddings.
Postgres Connection string is required
Either pass it as a parameter
or set the PG_CONNECTION_STRING environment variable. |
_call | """First try to lookup in queries, else return 'foo' or 'bar'."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response | def _call(self, messages: List[BaseMessage], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response | First try to lookup in queries, else return 'foo' or 'bar'. |
convert_messages_for_finetuning | """Convert messages to a list of lists of dictionaries for fine-tuning.
Args:
sessions: The chat sessions.
Returns:
The list of lists of dictionaries.
"""
return [[convert_message_to_dict(s) for s in session['messages']] for
session in sessions if _has_assistant_message(session)] | def convert_messages_for_finetuning(sessions: Iterable[ChatSession]) ->List[
List[dict]]:
"""Convert messages to a list of lists of dictionaries for fine-tuning.
Args:
sessions: The chat sessions.
Returns:
The list of lists of dictionaries.
"""
return [[convert_message_to_dict(s) for s in session['messages']] for
session in sessions if _has_assistant_message(session)] | Convert messages to a list of lists of dictionaries for fine-tuning.
Args:
sessions: The chat sessions.
Returns:
The list of lists of dictionaries. |
_llm_type | return 'hunyuan-chat' | @property
def _llm_type(self) ->str:
return 'hunyuan-chat' | null |
embeddings | return self._embeddings | @property
def embeddings(self) ->Embeddings:
return self._embeddings | null |
_format_nested_properties | """Format nested properties."""
formatted_props = []
for prop in properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = '' if prop.required else '?'
prop_desc = f'/* {prop.description} */' if prop.description else ''
if prop.properties:
nested_props = self._format_nested_properties(prop.properties,
indent + 2)
prop_type = f"{{\n{nested_props}\n{' ' * indent}}}"
formatted_props.append(
f"{prop_desc}\n{' ' * indent}{prop_name}{prop_required}: {prop_type},")
return '\n'.join(formatted_props) | def _format_nested_properties(self, properties: List[APIRequestBodyProperty
], indent: int=2) ->str:
"""Format nested properties."""
formatted_props = []
for prop in properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = '' if prop.required else '?'
prop_desc = f'/* {prop.description} */' if prop.description else ''
if prop.properties:
nested_props = self._format_nested_properties(prop.properties,
indent + 2)
prop_type = f"{{\n{nested_props}\n{' ' * indent}}}"
formatted_props.append(
f"""{prop_desc}
{' ' * indent}{prop_name}{prop_required}: {prop_type},"""
)
return '\n'.join(formatted_props) | Format nested properties. |
_type | """Snake-case string identifier for an output parser type."""
return 'boolean_output_parser' | @property
def _type(self) ->str:
"""Snake-case string identifier for an output parser type."""
return 'boolean_output_parser' | Snake-case string identifier for an output parser type. |
_evaluate_strings | """
Evaluate the regex match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference regex pattern.
Returns:
dict: The evaluation results containing the score.
"""
match = re.match(reference, prediction, flags=self.flags)
return {'score': int(bool(match))} | def _evaluate_strings(self, *, prediction: str, reference: str, **kwargs: Any
) ->dict:
"""
Evaluate the regex match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference regex pattern.
Returns:
dict: The evaluation results containing the score.
"""
match = re.match(reference, prediction, flags=self.flags)
return {'score': int(bool(match))} | Evaluate the regex match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference regex pattern.
Returns:
dict: The evaluation results containing the score. |
from_texts | """Create a Redis vectorstore from a list of texts.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new Redis index if it doesn't already exist
3. Adds the documents to the newly created Redis index.
This method will generate schema based on the metadata passed in
if the `index_schema` is not defined. If the `index_schema` is defined,
it will compare against the generated schema and warn if there are
differences. If you are purposefully defining the schema for the
metadata, then you can ignore that warning.
To examine the schema options, initialize an instance of this class
and print out the schema using the `Redis.schema`` property. This
will include the content and content_vector classes which are
always present in the langchain schema.
Example:
.. code-block:: python
from langchain_community.vectorstores import Redis
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
Args:
texts (List[str]): List of texts to add to the vectorstore.
embedding (Embeddings): Embedding model class (i.e. OpenAIEmbeddings)
for embedding queries.
metadatas (Optional[List[dict]], optional): Optional list of metadata dicts
to add to the vectorstore. Defaults to None.
index_name (Optional[str], optional): Optional name of the index to create
or add to. Defaults to None.
index_schema (Optional[Union[Dict[str, str], str, os.PathLike]], optional):
Optional fields to index within the metadata. Overrides generated
schema. Defaults to None.
vector_schema (Optional[Dict[str, Union[str, int]]], optional): Optional
vector schema to use. Defaults to None.
**kwargs (Any): Additional keyword arguments to pass to the Redis client.
Returns:
Redis: Redis VectorStore instance.
Raises:
ValueError: If the number of metadatas does not match the number of texts.
ImportError: If the redis python package is not installed.
"""
instance, _ = cls.from_texts_return_keys(texts, embedding, metadatas=
metadatas, index_name=index_name, index_schema=index_schema,
vector_schema=vector_schema, **kwargs)
return instance | @classmethod
def from_texts(cls: Type[Redis], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[dict]]=None, index_name: Optional[str]=None,
index_schema: Optional[Union[Dict[str, str], str, os.PathLike]]=None,
vector_schema: Optional[Dict[str, Union[str, int]]]=None, **kwargs: Any
) ->Redis:
"""Create a Redis vectorstore from a list of texts.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new Redis index if it doesn't already exist
3. Adds the documents to the newly created Redis index.
This method will generate schema based on the metadata passed in
if the `index_schema` is not defined. If the `index_schema` is defined,
it will compare against the generated schema and warn if there are
differences. If you are purposefully defining the schema for the
metadata, then you can ignore that warning.
To examine the schema options, initialize an instance of this class
and print out the schema using the `Redis.schema`` property. This
will include the content and content_vector classes which are
always present in the langchain schema.
Example:
.. code-block:: python
from langchain_community.vectorstores import Redis
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
Args:
texts (List[str]): List of texts to add to the vectorstore.
embedding (Embeddings): Embedding model class (i.e. OpenAIEmbeddings)
for embedding queries.
metadatas (Optional[List[dict]], optional): Optional list of metadata dicts
to add to the vectorstore. Defaults to None.
index_name (Optional[str], optional): Optional name of the index to create
or add to. Defaults to None.
index_schema (Optional[Union[Dict[str, str], str, os.PathLike]], optional):
Optional fields to index within the metadata. Overrides generated
schema. Defaults to None.
vector_schema (Optional[Dict[str, Union[str, int]]], optional): Optional
vector schema to use. Defaults to None.
**kwargs (Any): Additional keyword arguments to pass to the Redis client.
Returns:
Redis: Redis VectorStore instance.
Raises:
ValueError: If the number of metadatas does not match the number of texts.
ImportError: If the redis python package is not installed.
"""
instance, _ = cls.from_texts_return_keys(texts, embedding, metadatas=
metadatas, index_name=index_name, index_schema=index_schema,
vector_schema=vector_schema, **kwargs)
return instance | Create a Redis vectorstore from a list of texts.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new Redis index if it doesn't already exist
3. Adds the documents to the newly created Redis index.
This method will generate schema based on the metadata passed in
if the `index_schema` is not defined. If the `index_schema` is defined,
it will compare against the generated schema and warn if there are
differences. If you are purposefully defining the schema for the
metadata, then you can ignore that warning.
To examine the schema options, initialize an instance of this class
and print out the schema using the `Redis.schema`` property. This
will include the content and content_vector classes which are
always present in the langchain schema.
Example:
.. code-block:: python
from langchain_community.vectorstores import Redis
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
Args:
texts (List[str]): List of texts to add to the vectorstore.
embedding (Embeddings): Embedding model class (i.e. OpenAIEmbeddings)
for embedding queries.
metadatas (Optional[List[dict]], optional): Optional list of metadata dicts
to add to the vectorstore. Defaults to None.
index_name (Optional[str], optional): Optional name of the index to create
or add to. Defaults to None.
index_schema (Optional[Union[Dict[str, str], str, os.PathLike]], optional):
Optional fields to index within the metadata. Overrides generated
schema. Defaults to None.
vector_schema (Optional[Dict[str, Union[str, int]]], optional): Optional
vector schema to use. Defaults to None.
**kwargs (Any): Additional keyword arguments to pass to the Redis client.
Returns:
Redis: Redis VectorStore instance.
Raises:
ValueError: If the number of metadatas does not match the number of texts.
ImportError: If the redis python package is not installed. |
_prepare_vector_query | """Prepare query for vector search.
Args:
k: Number of results to return.
filter: Optional metadata filter.
Returns:
query: Query object.
"""
try:
from redis.commands.search.query import Query
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
return_fields = return_fields or []
query_prefix = '*'
if filter:
query_prefix = f'{str(filter)}'
vector_key = self._schema.content_vector_key
base_query = f'({query_prefix})=>[KNN {k} @{vector_key} $vector AS distance]'
query = Query(base_query).return_fields(*return_fields).sort_by('distance'
).paging(0, k).dialect(2)
return query | def _prepare_vector_query(self, k: int, filter: Optional[
RedisFilterExpression]=None, return_fields: Optional[List[str]]=None
) ->'Query':
"""Prepare query for vector search.
Args:
k: Number of results to return.
filter: Optional metadata filter.
Returns:
query: Query object.
"""
try:
from redis.commands.search.query import Query
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
return_fields = return_fields or []
query_prefix = '*'
if filter:
query_prefix = f'{str(filter)}'
vector_key = self._schema.content_vector_key
base_query = (
f'({query_prefix})=>[KNN {k} @{vector_key} $vector AS distance]')
query = Query(base_query).return_fields(*return_fields).sort_by('distance'
).paging(0, k).dialect(2)
return query | Prepare query for vector search.
Args:
k: Number of results to return.
filter: Optional metadata filter.
Returns:
query: Query object. |
requires_input | return True | @property
def requires_input(self) ->bool:
return True | null |
test_parse_examples_correct | from vertexai.language_models import InputOutputTextPair
text_question = (
'Hello, could you recommend a good movie for me to watch this evening, please?'
)
question = HumanMessage(content=text_question)
text_answer = (
'Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring (2001): This is the first movie in the Lord of the Rings trilogy.'
)
answer = AIMessage(content=text_answer)
examples = _parse_examples([question, answer, question, answer])
assert len(examples) == 2
assert examples == [InputOutputTextPair(input_text=text_question,
output_text=text_answer), InputOutputTextPair(input_text=text_question,
output_text=text_answer)] | def test_parse_examples_correct() ->None:
from vertexai.language_models import InputOutputTextPair
text_question = (
'Hello, could you recommend a good movie for me to watch this evening, please?'
)
question = HumanMessage(content=text_question)
text_answer = (
'Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring (2001): This is the first movie in the Lord of the Rings trilogy.'
)
answer = AIMessage(content=text_answer)
examples = _parse_examples([question, answer, question, answer])
assert len(examples) == 2
assert examples == [InputOutputTextPair(input_text=text_question,
output_text=text_answer), InputOutputTextPair(input_text=
text_question, output_text=text_answer)] | null |
test_astradb_vectorstore_from_x | """from_texts and from_documents methods."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB.from_texts(texts=['Hi', 'Ho'], embedding=emb,
collection_name='lc_test_ft', token=os.environ[
'ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE'))
try:
assert v_store.similarity_search('Ho', k=1)[0].page_content == 'Ho'
finally:
v_store.delete_collection()
v_store_2 = AstraDB.from_documents([Document(page_content='Hee'), Document(
page_content='Hoi')], embedding=emb, collection_name='lc_test_fd',
token=os.environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ
['ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE'))
try:
assert v_store_2.similarity_search('Hoi', k=1)[0].page_content == 'Hoi'
finally:
v_store_2.delete_collection() | def test_astradb_vectorstore_from_x(self) ->None:
"""from_texts and from_documents methods."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB.from_texts(texts=['Hi', 'Ho'], embedding=emb,
collection_name='lc_test_ft', token=os.environ[
'ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE')
)
try:
assert v_store.similarity_search('Ho', k=1)[0].page_content == 'Ho'
finally:
v_store.delete_collection()
v_store_2 = AstraDB.from_documents([Document(page_content='Hee'),
Document(page_content='Hoi')], embedding=emb, collection_name=
'lc_test_fd', token=os.environ['ASTRA_DB_APPLICATION_TOKEN'],
api_endpoint=os.environ['ASTRA_DB_API_ENDPOINT'], namespace=os.
environ.get('ASTRA_DB_KEYSPACE'))
try:
assert v_store_2.similarity_search('Hoi', k=1)[0].page_content == 'Hoi'
finally:
v_store_2.delete_collection() | from_texts and from_documents methods. |
on_agent_action_common | self.agent_actions += 1
self.starts += 1 | def on_agent_action_common(self) ->None:
self.agent_actions += 1
self.starts += 1 | null |
get_vector_index_uri_from_group | """Get the URI of the vector index."""
return group[VECTOR_INDEX_NAME].uri | def get_vector_index_uri_from_group(group: Any) ->str:
"""Get the URI of the vector index."""
return group[VECTOR_INDEX_NAME].uri | Get the URI of the vector index. |
_ExtSlice | if len(t.dims) == 1:
elt = t.dims[0]
self.dispatch(elt)
self.write(',')
else:
interleave(lambda : self.write(', '), self.dispatch, t.dims) | def _ExtSlice(self, t):
if len(t.dims) == 1:
elt = t.dims[0]
self.dispatch(elt)
self.write(',')
else:
interleave(lambda : self.write(', '), self.dispatch, t.dims) | null |
_llm_type | return 'azure-openai-chat' | @property
def _llm_type(self) ->str:
return 'azure-openai-chat' | null |
_identifying_params | """Identifying parameters"""
return {'account_id': self.account_id, 'api_token': self.api_token, 'model':
self.model, 'base_url': self.base_url} | @property
def _identifying_params(self) ->Dict[str, Any]:
"""Identifying parameters"""
return {'account_id': self.account_id, 'api_token': self.api_token,
'model': self.model, 'base_url': self.base_url} | Identifying parameters |
_identifying_params | """Get the identifying parameters."""
return {'model': self.model, 'model_kwargs': self.model_kwargs} | @property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {'model': self.model, 'model_kwargs': self.model_kwargs} | Get the identifying parameters. |
__init__ | self.client = client
self.config = config
self.moderation_callback = moderation_callback
self.unique_id = unique_id
self.chat_message_index = 0
self.run_manager = run_manager
self.chain_id = str(uuid.uuid4()) | def __init__(self, client: Any, config: Optional[Any]=None,
moderation_callback: Optional[Any]=None, unique_id: Optional[str]=None,
run_manager: Optional[CallbackManagerForChainRun]=None):
self.client = client
self.config = config
self.moderation_callback = moderation_callback
self.unique_id = unique_id
self.chat_message_index = 0
self.run_manager = run_manager
self.chain_id = str(uuid.uuid4()) | null |
get_lambda_source | """Get the source code of a lambda function.
Args:
func: a callable that can be a lambda function
Returns:
str: the source code of the lambda function
"""
try:
name = func.__name__ if func.__name__ != '<lambda>' else None
except AttributeError:
name = None
try:
code = inspect.getsource(func)
tree = ast.parse(textwrap.dedent(code))
visitor = GetLambdaSource()
visitor.visit(tree)
return visitor.source if visitor.count == 1 else name
except (SyntaxError, TypeError, OSError):
return name | def get_lambda_source(func: Callable) ->Optional[str]:
"""Get the source code of a lambda function.
Args:
func: a callable that can be a lambda function
Returns:
str: the source code of the lambda function
"""
try:
name = func.__name__ if func.__name__ != '<lambda>' else None
except AttributeError:
name = None
try:
code = inspect.getsource(func)
tree = ast.parse(textwrap.dedent(code))
visitor = GetLambdaSource()
visitor.visit(tree)
return visitor.source if visitor.count == 1 else name
except (SyntaxError, TypeError, OSError):
return name | Get the source code of a lambda function.
Args:
func: a callable that can be a lambda function
Returns:
str: the source code of the lambda function |
test_old_api_works | llm = _FakeTrajectoryChatModel(queries={'a': """Trajectory good
Score: 5""",
'b': """Trajectory not good
Score: 1"""}, sequential_responses=True)
chain = TrajectoryEvalChain.from_llm(llm=llm)
res = chain({'question': 'What is your favorite food?', 'agent_trajectory':
intermediate_steps, 'answer': 'I like pie.'})
assert res['score'] == 1.0
res = chain({'question': 'What is your favorite food?', 'agent_trajectory':
intermediate_steps, 'answer': 'I like pie.', 'reference': 'Paris'})
assert res['score'] == 0.0 | def test_old_api_works(intermediate_steps: List[Tuple[AgentAction, str]]
) ->None:
llm = _FakeTrajectoryChatModel(queries={'a':
'Trajectory good\nScore: 5', 'b':
"""Trajectory not good
Score: 1"""}, sequential_responses=True)
chain = TrajectoryEvalChain.from_llm(llm=llm)
res = chain({'question': 'What is your favorite food?',
'agent_trajectory': intermediate_steps, 'answer': 'I like pie.'})
assert res['score'] == 1.0
res = chain({'question': 'What is your favorite food?',
'agent_trajectory': intermediate_steps, 'answer': 'I like pie.',
'reference': 'Paris'})
assert res['score'] == 0.0 | null |
test_div_role_main | loader = ReadTheDocsLoader(PARENT_DIR / 'div_role_main')
documents = loader.load()
assert len(documents[0].page_content) != 0 | @pytest.mark.requires('bs4')
def test_div_role_main() ->None:
loader = ReadTheDocsLoader(PARENT_DIR / 'div_role_main')
documents = loader.load()
assert len(documents[0].page_content) != 0 | null |
similarity_search_with_relevance_scores | score_threshold = kwargs.pop('score_threshold', None)
result = self.vector_search_with_score(query, k=k, **kwargs)
return result if score_threshold is None else [r for r in result if r[1] >=
score_threshold] | def similarity_search_with_relevance_scores(self, query: str, k: int=4, **
kwargs: Any) ->List[Tuple[Document, float]]:
score_threshold = kwargs.pop('score_threshold', None)
result = self.vector_search_with_score(query, k=k, **kwargs)
return result if score_threshold is None else [r for r in result if r[1
] >= score_threshold] | null |
test_visit_comparison | comparator, value, expected = triplet
comp = Comparison(comparator=comparator, attribute='foo', value=value)
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual | @pytest.mark.parametrize('triplet', [(Comparator.LT, 2, 'metadata.foo < 2'),
(Comparator.LTE, 2, 'metadata.foo <= 2'), (Comparator.GT, 2,
'metadata.foo > 2'), (Comparator.GTE, 2, 'metadata.foo >= 2'), (
Comparator.CONTAIN, 2, 'has(metadata.foo,2)'), (Comparator.LIKE, 'bar',
"metadata.foo ILIKE '%bar%'")])
def test_visit_comparison(triplet: Tuple[Comparator, Any, str]) ->None:
comparator, value, expected = triplet
comp = Comparison(comparator=comparator, attribute='foo', value=value)
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual | null |
embed_query | """
Generates an embedding for a single piece of text.
Args:
text (str): The text to generate an embedding for.
Returns:
The embedding for the text.
"""
return self.nlp(text).vector.tolist() | def embed_query(self, text: str) ->List[float]:
"""
Generates an embedding for a single piece of text.
Args:
text (str): The text to generate an embedding for.
Returns:
The embedding for the text.
"""
return self.nlp(text).vector.tolist() | Generates an embedding for a single piece of text.
Args:
text (str): The text to generate an embedding for.
Returns:
The embedding for the text. |
test_awadb | """Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts,
embedding=FakeEmbeddings())
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')] | def test_awadb() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts,
embedding=FakeEmbeddings())
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')] | Test end to end construction and search. |
test_prompt_missing_input_variables | """Test error is raised when input variables are not provided."""
template = 'This is a {foo} test.'
input_variables: list = []
with pytest.raises(ValueError):
PromptTemplate(input_variables=input_variables, template=template,
validate_template=True)
assert PromptTemplate(input_variables=input_variables, template=template
).input_variables == ['foo'] | def test_prompt_missing_input_variables() ->None:
"""Test error is raised when input variables are not provided."""
template = 'This is a {foo} test.'
input_variables: list = []
with pytest.raises(ValueError):
PromptTemplate(input_variables=input_variables, template=template,
validate_template=True)
assert PromptTemplate(input_variables=input_variables, template=template
).input_variables == ['foo'] | Test error is raised when input variables are not provided. |
_import_gmail_GmailGetThread | from langchain_community.tools.gmail import GmailGetThread
return GmailGetThread | def _import_gmail_GmailGetThread() ->Any:
from langchain_community.tools.gmail import GmailGetThread
return GmailGetThread | null |
_ImportFrom | self.fill('from ')
self.write('.' * t.level)
if t.module:
self.write(t.module)
self.write(' import ')
interleave(lambda : self.write(', '), self.dispatch, t.names) | def _ImportFrom(self, t):
self.fill('from ')
self.write('.' * t.level)
if t.module:
self.write(t.module)
self.write(' import ')
interleave(lambda : self.write(', '), self.dispatch, t.names) | null |
embed_documents | """Call out to OpenAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
engine = cast(str, self.deployment)
return self._get_len_safe_embeddings(texts, engine=engine) | def embed_documents(self, texts: List[str], chunk_size: Optional[int]=0
) ->List[List[float]]:
"""Call out to OpenAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
engine = cast(str, self.deployment)
return self._get_len_safe_embeddings(texts, engine=engine) | Call out to OpenAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text. |
_get_parameters | """
Performs sanity check, preparing parameters in format needed by llama_cpp.
Args:
stop (Optional[List[str]]): List of stop sequences for llama_cpp.
Returns:
Dictionary containing the combined parameters.
"""
if self.stop and stop is not None:
raise ValueError('`stop` found in both the input and default params.')
params = self._default_params
params.pop('stop_sequences')
params['stop'] = self.stop or stop or []
return params | def _get_parameters(self, stop: Optional[List[str]]=None) ->Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by llama_cpp.
Args:
stop (Optional[List[str]]): List of stop sequences for llama_cpp.
Returns:
Dictionary containing the combined parameters.
"""
if self.stop and stop is not None:
raise ValueError('`stop` found in both the input and default params.')
params = self._default_params
params.pop('stop_sequences')
params['stop'] = self.stop or stop or []
return params | Performs sanity check, preparing parameters in format needed by llama_cpp.
Args:
stop (Optional[List[str]]): List of stop sequences for llama_cpp.
Returns:
Dictionary containing the combined parameters. |
test_quip_loader_initialization | QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60)
mock_quip.assert_called_once_with(access_token=self.ACCESS_TOKEN, base_url=
self.API_URL, request_timeout=60) | def test_quip_loader_initialization(self, mock_quip: MagicMock) ->None:
QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60
)
mock_quip.assert_called_once_with(access_token=self.ACCESS_TOKEN,
base_url=self.API_URL, request_timeout=60) | null |
test_chat_fireworks_model | """Test ChatFireworks wrapper handles model_name."""
chat = ChatFireworks(model='foo')
assert chat.model == 'foo' | @pytest.mark.scheduled
def test_chat_fireworks_model() ->None:
"""Test ChatFireworks wrapper handles model_name."""
chat = ChatFireworks(model='foo')
assert chat.model == 'foo' | Test ChatFireworks wrapper handles model_name. |
from_bytes_store | """On-ramp that adds the necessary serialization and encoding to the store.
Args:
underlying_embeddings: The embedder to use for embedding.
document_embedding_cache: The cache to use for storing document embeddings.
*,
namespace: The namespace to use for document cache.
This namespace is used to avoid collisions with other caches.
For example, set it to the name of the embedding model used.
"""
namespace = namespace
key_encoder = _create_key_encoder(namespace)
encoder_backed_store = EncoderBackedStore[str, List[float]](
document_embedding_cache, key_encoder, _value_serializer,
_value_deserializer)
return cls(underlying_embeddings, encoder_backed_store) | @classmethod
def from_bytes_store(cls, underlying_embeddings: Embeddings,
document_embedding_cache: ByteStore, *, namespace: str=''
) ->CacheBackedEmbeddings:
"""On-ramp that adds the necessary serialization and encoding to the store.
Args:
underlying_embeddings: The embedder to use for embedding.
document_embedding_cache: The cache to use for storing document embeddings.
*,
namespace: The namespace to use for document cache.
This namespace is used to avoid collisions with other caches.
For example, set it to the name of the embedding model used.
"""
namespace = namespace
key_encoder = _create_key_encoder(namespace)
encoder_backed_store = EncoderBackedStore[str, List[float]](
document_embedding_cache, key_encoder, _value_serializer,
_value_deserializer)
return cls(underlying_embeddings, encoder_backed_store) | On-ramp that adds the necessary serialization and encoding to the store.
Args:
underlying_embeddings: The embedder to use for embedding.
document_embedding_cache: The cache to use for storing document embeddings.
*,
namespace: The namespace to use for document cache.
This namespace is used to avoid collisions with other caches.
For example, set it to the name of the embedding model used. |
patch | """PATCH the URL and return the text."""
return requests.patch(url, json=data, headers=self.headers, auth=self.auth,
**kwargs) | def patch(self, url: str, data: Dict[str, Any], **kwargs: Any
) ->requests.Response:
"""PATCH the URL and return the text."""
return requests.patch(url, json=data, headers=self.headers, auth=self.
auth, **kwargs) | PATCH the URL and return the text. |
test_llamacpp_embedding_documents | """Test llamacpp embeddings."""
documents = ['foo bar']
model_path = get_model()
embedding = LlamaCppEmbeddings(model_path=model_path)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 512 | def test_llamacpp_embedding_documents() ->None:
"""Test llamacpp embeddings."""
documents = ['foo bar']
model_path = get_model()
embedding = LlamaCppEmbeddings(model_path=model_path)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 512 | Test llamacpp embeddings. |
_validate_uri | if self.target_uri == 'databricks':
return
allowed = ['http', 'https', 'databricks']
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f'Invalid target URI: {self.target_uri}. The scheme must be one of {allowed}.'
) | def _validate_uri(self) ->None:
if self.target_uri == 'databricks':
return
allowed = ['http', 'https', 'databricks']
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f'Invalid target URI: {self.target_uri}. The scheme must be one of {allowed}.'
) | null |
__init__ | """Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_typeform', pip_name=
'airbyte-source-typeform').SourceTypeform
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state) | def __init__(self, config: Mapping[str, Any], stream_name: str,
record_handler: Optional[RecordHandler]=None, state: Optional[Any]=None
) ->None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_typeform', pip_name=
'airbyte-source-typeform').SourceTypeform
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state) | Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None. |
raise_deprecation | """Raise deprecation warning if callback_manager is used."""
if values.get('callback_manager') is not None:
warnings.warn(
'callback_manager is deprecated. Please use callbacks instead.',
DeprecationWarning)
values['callbacks'] = values.pop('callback_manager', None)
return values | @root_validator()
def raise_deprecation(cls, values: Dict) ->Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get('callback_manager') is not None:
warnings.warn(
'callback_manager is deprecated. Please use callbacks instead.',
DeprecationWarning)
values['callbacks'] = values.pop('callback_manager', None)
return values | Raise deprecation warning if callback_manager is used. |
test_similarity_search | """Test similarity search."""
output = deeplake_datastore.similarity_search('foo', k=1, distance_metric=
distance_metric)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
tql_query = (
f"SELECT * WHERE id=='{deeplake_datastore.vectorstore.dataset.id[0].numpy()[0]}'"
)
output = deeplake_datastore.similarity_search(query='foo', tql_query=
tql_query, k=1, distance_metric=distance_metric)
assert len(output) == 1
deeplake_datastore.delete_dataset() | def test_similarity_search(deeplake_datastore: DeepLake, distance_metric: str
) ->None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search('foo', k=1,
distance_metric=distance_metric)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
tql_query = (
f"SELECT * WHERE id=='{deeplake_datastore.vectorstore.dataset.id[0].numpy()[0]}'"
)
output = deeplake_datastore.similarity_search(query='foo', tql_query=
tql_query, k=1, distance_metric=distance_metric)
assert len(output) == 1
deeplake_datastore.delete_dataset() | Test similarity search. |
from_existing_index | """Connect to an existing Tair index."""
url = get_from_dict_or_env(kwargs, 'tair_url', 'TAIR_URL')
search_params = {}
if 'search_params' in kwargs:
search_params = kwargs.pop('search_params')
return cls(embedding, url, index_name, content_key=content_key,
metadata_key=metadata_key, search_params=search_params, **kwargs) | @classmethod
def from_existing_index(cls, embedding: Embeddings, index_name: str=
'langchain', content_key: str='content', metadata_key: str='metadata',
**kwargs: Any) ->Tair:
"""Connect to an existing Tair index."""
url = get_from_dict_or_env(kwargs, 'tair_url', 'TAIR_URL')
search_params = {}
if 'search_params' in kwargs:
search_params = kwargs.pop('search_params')
return cls(embedding, url, index_name, content_key=content_key,
metadata_key=metadata_key, search_params=search_params, **kwargs) | Connect to an existing Tair index. |
test_all_steps | joke = 'Why did the chicken cross the Mobius strip?'
response = 'Resolution response'
ideation_llm = FakeListLLM(responses=['Ideation response' for _ in range(20)])
critique_llm = FakeListLLM(responses=['Critique response' for _ in range(20)])
resolver_llm = FakeListLLM(responses=[response for _ in range(20)])
prompt = PromptTemplate(input_variables=['joke'], template=
'Explain this joke to me: {joke}?')
chain = SmartLLMChain(ideation_llm=ideation_llm, critique_llm=critique_llm,
resolver_llm=resolver_llm, prompt=prompt)
result = chain(joke)
assert result['joke'] == joke
assert result['resolution'] == response | def test_all_steps() ->None:
joke = 'Why did the chicken cross the Mobius strip?'
response = 'Resolution response'
ideation_llm = FakeListLLM(responses=['Ideation response' for _ in
range(20)])
critique_llm = FakeListLLM(responses=['Critique response' for _ in
range(20)])
resolver_llm = FakeListLLM(responses=[response for _ in range(20)])
prompt = PromptTemplate(input_variables=['joke'], template=
'Explain this joke to me: {joke}?')
chain = SmartLLMChain(ideation_llm=ideation_llm, critique_llm=
critique_llm, resolver_llm=resolver_llm, prompt=prompt)
result = chain(joke)
assert result['joke'] == joke
assert result['resolution'] == response | null |
debug_output | """
Print a debug message if DEBUG is True.
Args:
s: The message to print
"""
if DEBUG:
print(s) | def debug_output(s: Any) ->None:
"""
Print a debug message if DEBUG is True.
Args:
s: The message to print
"""
if DEBUG:
print(s) | Print a debug message if DEBUG is True.
Args:
s: The message to print |
invoke | config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(dumpd(self), input, name=
config.get('run_name'))
first_error = None
for runnable in self.runnables:
try:
output = runnable.invoke(input, patch_config(config, callbacks=
run_manager.get_child()), **kwargs)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
run_manager.on_chain_error(e)
raise e
else:
run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError('No error stored at end of fallbacks.')
run_manager.on_chain_error(first_error)
raise first_error | def invoke(self, input: Input, config: Optional[RunnableConfig]=None, **
kwargs: Any) ->Output:
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(dumpd(self), input, name=
config.get('run_name'))
first_error = None
for runnable in self.runnables:
try:
output = runnable.invoke(input, patch_config(config, callbacks=
run_manager.get_child()), **kwargs)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
run_manager.on_chain_error(e)
raise e
else:
run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError('No error stored at end of fallbacks.')
run_manager.on_chain_error(first_error)
raise first_error | null |
test_repl_print_python_backticks | program = "`print('`python` is a great language.')`"
tool = PythonAstREPLTool()
assert tool.run(program) == '`python` is a great language.\n' | @pytest.mark.skipif(sys.version_info < (3, 9), reason=
'Requires python version >= 3.9 to run.')
def test_repl_print_python_backticks() ->None:
program = "`print('`python` is a great language.')`"
tool = PythonAstREPLTool()
assert tool.run(program) == '`python` is a great language.\n' | null |
lazy_parse | """Lazy parsing interface."""
yield Document(page_content='foo') | def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazy parsing interface."""
yield Document(page_content='foo') | Lazy parsing interface. |
ignore_agent | """Whether to ignore agent callbacks."""
return self.ignore_agent_ | @property
def ignore_agent(self) ->bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_ | Whether to ignore agent callbacks. |
_get_message_source | """
Get the message source as a string.
Args:
channel_name (str): The name of the channel the message belongs to.
user (str): The user ID who sent the message.
timestamp (str): The timestamp of the message.
Returns:
str: The message source.
"""
if self.workspace_url:
channel_id = self.channel_id_map.get(channel_name, '')
return (f'{self.workspace_url}/archives/{channel_id}' +
f"/p{timestamp.replace('.', '')}")
else:
return f'{channel_name} - {user} - {timestamp}' | def _get_message_source(self, channel_name: str, user: str, timestamp: str
) ->str:
"""
Get the message source as a string.
Args:
channel_name (str): The name of the channel the message belongs to.
user (str): The user ID who sent the message.
timestamp (str): The timestamp of the message.
Returns:
str: The message source.
"""
if self.workspace_url:
channel_id = self.channel_id_map.get(channel_name, '')
return (f'{self.workspace_url}/archives/{channel_id}' +
f"/p{timestamp.replace('.', '')}")
else:
return f'{channel_name} - {user} - {timestamp}' | Get the message source as a string.
Args:
channel_name (str): The name of the channel the message belongs to.
user (str): The user ID who sent the message.
timestamp (str): The timestamp of the message.
Returns:
str: The message source. |
_get_edge_properties | edge_properties_query = """
MATCH ()-[e:`{e_label}`]->()
RETURN properties(e) AS props
LIMIT 100
"""
edge_properties = []
for label in e_labels:
q = edge_properties_query.format(e_label=label)
data = {'label': label, 'properties': self.query(q)['results']}
s = set({})
for p in data['properties']:
for k, v in p['props'].items():
s.add((k, types[type(v).__name__]))
ep = {'type': label, 'properties': [{'property': k, 'type': v} for k, v in
s]}
edge_properties.append(ep)
return edge_properties | def _get_edge_properties(self, e_labels: List[str], types: Dict[str, Any]
) ->List:
edge_properties_query = """
MATCH ()-[e:`{e_label}`]->()
RETURN properties(e) AS props
LIMIT 100
"""
edge_properties = []
for label in e_labels:
q = edge_properties_query.format(e_label=label)
data = {'label': label, 'properties': self.query(q)['results']}
s = set({})
for p in data['properties']:
for k, v in p['props'].items():
s.add((k, types[type(v).__name__]))
ep = {'type': label, 'properties': [{'property': k, 'type': v} for
k, v in s]}
edge_properties.append(ep)
return edge_properties | null |
_import_jira_tool | from langchain_community.tools.jira.tool import JiraAction
return JiraAction | def _import_jira_tool() ->Any:
from langchain_community.tools.jira.tool import JiraAction
return JiraAction | null |
test_selector_threshold_more_than_one | """Tests NGramOverlapExampleSelector threshold greater than 1.0."""
selector.threshold = 1.0 + 1e-09
sentence = 'Spot can run.'
output = selector.select_examples({'input': sentence})
assert output == [] | def test_selector_threshold_more_than_one(selector: NGramOverlapExampleSelector
) ->None:
"""Tests NGramOverlapExampleSelector threshold greater than 1.0."""
selector.threshold = 1.0 + 1e-09
sentence = 'Spot can run.'
output = selector.select_examples({'input': sentence})
assert output == [] | Tests NGramOverlapExampleSelector threshold greater than 1.0. |
_get_python_function_name | """Get the name of a Python function."""
return function.__name__ | def _get_python_function_name(function: Callable) ->str:
"""Get the name of a Python function."""
return function.__name__ | Get the name of a Python function. |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages'] | Get the namespace of the langchain object. |
output_keys | """Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, 'intermediate_steps'] | @property
def output_keys(self) ->List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, 'intermediate_steps'] | Return the singular output key.
:meta private: |
is_lc_serializable | return True | @classmethod
def is_lc_serializable(cls) ->bool:
return True | null |
test_tool_from_function_with_run_manager | """Test run of tool when using run_manager."""
def foo(bar: str, callbacks: Optional[CallbackManagerForToolRun]=None) ->str:
"""Docstring
Args:
bar: str
"""
assert callbacks is not None
return 'foo' + bar
handler = FakeCallbackHandler()
tool = Tool.from_function(foo, name='foo', description='Docstring')
assert tool.run(tool_input={'bar': 'bar'}, run_manager=[handler]) == 'foobar'
assert tool.run('baz', run_manager=[handler]) == 'foobaz' | def test_tool_from_function_with_run_manager() ->None:
"""Test run of tool when using run_manager."""
def foo(bar: str, callbacks: Optional[CallbackManagerForToolRun]=None
) ->str:
"""Docstring
Args:
bar: str
"""
assert callbacks is not None
return 'foo' + bar
handler = FakeCallbackHandler()
tool = Tool.from_function(foo, name='foo', description='Docstring')
assert tool.run(tool_input={'bar': 'bar'}, run_manager=[handler]
) == 'foobar'
assert tool.run('baz', run_manager=[handler]) == 'foobaz' | Test run of tool when using run_manager. |
update_documents | """Update a document in the collection.
Args:
ids (List[str]): List of ids of the document to update.
documents (List[Document]): List of documents to update.
"""
text = [document.page_content for document in documents]
metadata = [document.metadata for document in documents]
if self._embedding_function is None:
raise ValueError(
'For update, you must specify an embedding function on creation.')
embeddings = self._embedding_function.embed_documents(text)
if hasattr(self._collection._client, 'max_batch_size'):
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(api=self._collection._client, ids=ids,
metadatas=metadata, documents=text, embeddings=embeddings):
self._collection.update(ids=batch[0], embeddings=batch[1],
documents=batch[3], metadatas=batch[2])
else:
self._collection.update(ids=ids, embeddings=embeddings, documents=text,
metadatas=metadata) | def update_documents(self, ids: List[str], documents: List[Document]) ->None:
"""Update a document in the collection.
Args:
ids (List[str]): List of ids of the document to update.
documents (List[Document]): List of documents to update.
"""
text = [document.page_content for document in documents]
metadata = [document.metadata for document in documents]
if self._embedding_function is None:
raise ValueError(
'For update, you must specify an embedding function on creation.')
embeddings = self._embedding_function.embed_documents(text)
if hasattr(self._collection._client, 'max_batch_size'):
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(api=self._collection._client, ids=ids,
metadatas=metadata, documents=text, embeddings=embeddings):
self._collection.update(ids=batch[0], embeddings=batch[1],
documents=batch[3], metadatas=batch[2])
else:
self._collection.update(ids=ids, embeddings=embeddings, documents=
text, metadatas=metadata) | Update a document in the collection.
Args:
ids (List[str]): List of ids of the document to update.
documents (List[Document]): List of documents to update. |
InputType | return Union[str, AnyMessage] | @property
def InputType(self) ->Any:
return Union[str, AnyMessage] | null |
_get_metadata | return {'source': f's3://{self.bucket}/{self.key}'} | def _get_metadata(self) ->dict:
return {'source': f's3://{self.bucket}/{self.key}'} | null |
from_url | """Get an OpenAPI spec from a URL."""
response = requests.get(url)
return cls.from_text(response.text) | @classmethod
def from_url(cls, url: str) ->OpenAPISpec:
"""Get an OpenAPI spec from a URL."""
response = requests.get(url)
return cls.from_text(response.text) | Get an OpenAPI spec from a URL. |
get_default_document_variable_name | """Get default document variable name, if not provided.
If only one variable is present in the llm_chain.prompt,
we can infer that the formatted documents should be passed in
with this variable name.
"""
llm_chain_variables = values['llm_chain'].prompt.input_variables
if 'document_variable_name' not in values:
if len(llm_chain_variables) == 1:
values['document_variable_name'] = llm_chain_variables[0]
else:
raise ValueError(
'document_variable_name must be provided if there are multiple llm_chain_variables'
)
elif values['document_variable_name'] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was not found in llm_chain input_variables: {llm_chain_variables}"
)
return values | @root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) ->Dict:
"""Get default document variable name, if not provided.
If only one variable is present in the llm_chain.prompt,
we can infer that the formatted documents should be passed in
with this variable name.
"""
llm_chain_variables = values['llm_chain'].prompt.input_variables
if 'document_variable_name' not in values:
if len(llm_chain_variables) == 1:
values['document_variable_name'] = llm_chain_variables[0]
else:
raise ValueError(
'document_variable_name must be provided if there are multiple llm_chain_variables'
)
elif values['document_variable_name'] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was not found in llm_chain input_variables: {llm_chain_variables}"
)
return values | Get default document variable name, if not provided.
If only one variable is present in the llm_chain.prompt,
we can infer that the formatted documents should be passed in
with this variable name. |
_partial | return _make_with_name(func.__name__)(func) | def _partial(func: Callable[[str], str]) ->BaseTool:
return _make_with_name(func.__name__)(func) | null |
is_valid | import esprima
try:
esprima.parseScript(self.code)
return True
except esprima.Error:
return False | def is_valid(self) ->bool:
import esprima
try:
esprima.parseScript(self.code)
return True
except esprima.Error:
return False | null |
fetch_data | """Fetch data from a URL."""
headers = {'Authorization': access_token}
response = requests.get(url, headers=headers, params=query)
response.raise_for_status()
return response.json() | def fetch_data(url: str, access_token: str, query: Optional[dict]=None) ->dict:
"""Fetch data from a URL."""
headers = {'Authorization': access_token}
response = requests.get(url, headers=headers, params=query)
response.raise_for_status()
return response.json() | Fetch data from a URL. |
run | """Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._create_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()['result'] | def run(self, action_id: str, instructions: str, params: Optional[Dict]=None
) ->Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._create_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()['result'] | Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call. |
clear | """Removes all messages from the chat history"""
self._create_empty_doc()
if self.sync:
self._wait_until(lambda : not self.messages, RocksetChatMessageHistory.
ADD_TIMEOUT_MS) | def clear(self) ->None:
"""Removes all messages from the chat history"""
self._create_empty_doc()
if self.sync:
self._wait_until(lambda : not self.messages,
RocksetChatMessageHistory.ADD_TIMEOUT_MS) | Removes all messages from the chat history |
as_dict | schemas: Dict[str, List[Any]] = {'text': [], 'tag': [], 'numeric': []}
for attr, attr_value in self.__dict__.items():
if isinstance(attr_value, list) and len(attr_value) > 0:
field_values: List[Dict[str, Any]] = []
for val in attr_value:
value: Dict[str, Any] = {}
for field, field_value in val.__dict__.items():
if isinstance(field_value, Enum):
value[field] = field_value.value
elif field_value is not None:
value[field] = field_value
field_values.append(value)
schemas[attr] = field_values
schema: Dict[str, List[Any]] = {}
for k, v in schemas.items():
if len(v) > 0:
schema[k] = v
return schema | def as_dict(self) ->Dict[str, List[Any]]:
schemas: Dict[str, List[Any]] = {'text': [], 'tag': [], 'numeric': []}
for attr, attr_value in self.__dict__.items():
if isinstance(attr_value, list) and len(attr_value) > 0:
field_values: List[Dict[str, Any]] = []
for val in attr_value:
value: Dict[str, Any] = {}
for field, field_value in val.__dict__.items():
if isinstance(field_value, Enum):
value[field] = field_value.value
elif field_value is not None:
value[field] = field_value
field_values.append(value)
schemas[attr] = field_values
schema: Dict[str, List[Any]] = {}
for k, v in schemas.items():
if len(v) > 0:
schema[k] = v
return schema | null |
test_indent_lines_after_first | """Test indent_lines_after_first function"""
indented_text = indent_lines_after_first(text, prefix)
assert indented_text == expected_output | @pytest.mark.parametrize('text,prefix,expected_output', [(
"""line 1
line 2
line 3""", '1', """line 1
line 2
line 3"""), (
'line 1\nline 2\nline 3', 'ax', """line 1
line 2
line 3""")])
def test_indent_lines_after_first(text: str, prefix: str, expected_output: str
) ->None:
"""Test indent_lines_after_first function"""
indented_text = indent_lines_after_first(text, prefix)
assert indented_text == expected_output | Test indent_lines_after_first function |
from_params | """Convenience constructor that builds the odsp.ODPS MaxCompute client from
given parameters.
Args:
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`.
"""
try:
from odps import ODPS
except ImportError as ex:
raise ImportError(
'Could not import pyodps python package. Please install it with `pip install pyodps` or refer to https://pyodps.readthedocs.io/.'
) from ex
access_id = access_id or get_from_env('access_id', 'MAX_COMPUTE_ACCESS_ID')
secret_access_key = secret_access_key or get_from_env('secret_access_key',
'MAX_COMPUTE_SECRET_ACCESS_KEY')
client = ODPS(access_id=access_id, secret_access_key=secret_access_key,
project=project, endpoint=endpoint)
if not client.exist_project(project):
raise ValueError(f'The project "{project}" does not exist.')
return cls(client) | @classmethod
def from_params(cls, endpoint: str, project: str, *, access_id: Optional[
str]=None, secret_access_key: Optional[str]=None) ->MaxComputeAPIWrapper:
"""Convenience constructor that builds the odsp.ODPS MaxCompute client from
given parameters.
Args:
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`.
"""
try:
from odps import ODPS
except ImportError as ex:
raise ImportError(
'Could not import pyodps python package. Please install it with `pip install pyodps` or refer to https://pyodps.readthedocs.io/.'
) from ex
access_id = access_id or get_from_env('access_id', 'MAX_COMPUTE_ACCESS_ID')
secret_access_key = secret_access_key or get_from_env('secret_access_key',
'MAX_COMPUTE_SECRET_ACCESS_KEY')
client = ODPS(access_id=access_id, secret_access_key=secret_access_key,
project=project, endpoint=endpoint)
if not client.exist_project(project):
raise ValueError(f'The project "{project}" does not exist.')
return cls(client) | Convenience constructor that builds the odsp.ODPS MaxCompute client from
given parameters.
Args:
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`. |
_get_multi_prompt | num_dfs = len(dfs)
if suffix is not None:
suffix_to_use = suffix
include_dfs_head = True
elif include_df_in_prompt:
suffix_to_use = SUFFIX_WITH_MULTI_DF
include_dfs_head = True
else:
suffix_to_use = SUFFIX_NO_DF
include_dfs_head = False
if input_variables is None:
input_variables = ['input', 'agent_scratchpad', 'num_dfs']
if include_dfs_head:
input_variables += ['dfs_head']
if prefix is None:
prefix = MULTI_DF_PREFIX
df_locals = {}
for i, dataframe in enumerate(dfs):
df_locals[f'df{i + 1}'] = dataframe
tools = [PythonAstREPLTool(locals=df_locals)] + list(extra_tools)
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=
suffix_to_use, input_variables=input_variables)
partial_prompt = prompt.partial()
if 'dfs_head' in input_variables:
dfs_head = '\n\n'.join([d.head(number_of_head_rows).to_markdown() for d in
dfs])
partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs), dfs_head=
dfs_head)
if 'num_dfs' in input_variables:
partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs))
return partial_prompt, tools | def _get_multi_prompt(dfs: List[Any], prefix: Optional[str]=None, suffix:
Optional[str]=None, input_variables: Optional[List[str]]=None,
include_df_in_prompt: Optional[bool]=True, number_of_head_rows: int=5,
extra_tools: Sequence[BaseTool]=()) ->Tuple[BasePromptTemplate, List[
BaseTool]]:
num_dfs = len(dfs)
if suffix is not None:
suffix_to_use = suffix
include_dfs_head = True
elif include_df_in_prompt:
suffix_to_use = SUFFIX_WITH_MULTI_DF
include_dfs_head = True
else:
suffix_to_use = SUFFIX_NO_DF
include_dfs_head = False
if input_variables is None:
input_variables = ['input', 'agent_scratchpad', 'num_dfs']
if include_dfs_head:
input_variables += ['dfs_head']
if prefix is None:
prefix = MULTI_DF_PREFIX
df_locals = {}
for i, dataframe in enumerate(dfs):
df_locals[f'df{i + 1}'] = dataframe
tools = [PythonAstREPLTool(locals=df_locals)] + list(extra_tools)
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=
suffix_to_use, input_variables=input_variables)
partial_prompt = prompt.partial()
if 'dfs_head' in input_variables:
dfs_head = '\n\n'.join([d.head(number_of_head_rows).to_markdown() for
d in dfs])
partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs),
dfs_head=dfs_head)
if 'num_dfs' in input_variables:
partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs))
return partial_prompt, tools | null |
run | """
Run commands in either an existing persistent
subprocess or on in a new subprocess environment.
Args:
commands(List[str]): a list of commands to
execute in the session
"""
if isinstance(commands, str):
commands = [commands]
commands = ';'.join(commands)
if self.process is not None:
return self._run_persistent(commands)
else:
return self._run(commands) | def run(self, commands: Union[str, List[str]]) ->str:
"""
Run commands in either an existing persistent
subprocess or on in a new subprocess environment.
Args:
commands(List[str]): a list of commands to
execute in the session
"""
if isinstance(commands, str):
commands = [commands]
commands = ';'.join(commands)
if self.process is not None:
return self._run_persistent(commands)
else:
return self._run(commands) | Run commands in either an existing persistent
subprocess or on in a new subprocess environment.
Args:
commands(List[str]): a list of commands to
execute in the session |