date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~titan_takeoff_pro.py | from typing import Any, Iterator, List, Mapping, Optional
import requests
from requests.exceptions import ConnectionError
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.schema.output import GenerationChunk
class TitanTakeoffPro(LLM):
base_url: Optional[str] = "http://localhost:3000"
"""Specifies the baseURL to use for the Titan Takeoff Pro API.
Default = http://localhost:3000.
"""
max_new_tokens: Optional[int] = None
"""Maximum tokens generated."""
min_new_tokens: Optional[int] = None
"""Minimum tokens generated."""
sampling_topk: Optional[int] = None
"""Sample predictions from the top K most probable candidates."""
sampling_topp: Optional[float] = None
"""Sample from predictions whose cumulative probability exceeds this value.
"""
sampling_temperature: Optional[float] = None
"""Sample with randomness. Bigger temperatures are associated with
more randomness and 'creativity'.
"""
repetition_penalty: Optional[float] = None
"""Penalise the generation of tokens that have been generated before.
Set to > 1 to penalize.
"""
regex_string: Optional[str] = None
"""A regex string for constrained generation."""
no_repeat_ngram_size: Optional[int] = None
"""Prevent repetitions of ngrams of this size. Default = 0 (turned off)."""
streaming: bool = False
"""Whether to stream the output. Default = False."""
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Titan Takeoff Server (Pro)."""
return {
**(
{"regex_string": self.regex_string}
if self.regex_string is not None
else {}
),
**(
{"sampling_temperature": self.sampling_temperature}
if self.sampling_temperature is not None
else {}
),
**(
{"sampling_topp": self.sampling_topp}
if self.sampling_topp is not None
else {}
),
**(
{"repetition_penalty": self.repetition_penalty}
if self.repetition_penalty is not None
else {}
),
**(
{"max_new_tokens": self.max_new_tokens}
if self.max_new_tokens is not None
else {}
),
**(
{"min_new_tokens": self.min_new_tokens}
if self.min_new_tokens is not None
else {}
),
**(
{"sampling_topk": self.sampling_topk}
if self.sampling_topk is not None
else {}
),
**(
{"no_repeat_ngram_size": self.no_repeat_ngram_size}
if self.no_repeat_ngram_size is not None
else {}
),
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "titan_takeoff_pro"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Titan Takeoff (Pro) generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
try:
if self.streaming:
text_output = ""
for chunk in self._stream(
prompt=prompt,
stop=stop,
run_manager=run_manager,
):
text_output += chunk.text
return text_output
url = f"{self.base_url}/generate"
params = {"text": prompt, **self._default_params}
response = requests.post(url, json=params)
response.raise_for_status()
response.encoding = "utf-8"
text = ""
if "text" in response.json():
text = response.json()["text"]
text = text.replace("</s>", "")
else:
raise ValueError("Something went wrong.")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
except ConnectionError:
raise ConnectionError(
"Could not connect to Titan Takeoff (Pro) server. \
Please make sure that the server is running."
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Titan Takeoff (Pro) stream endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
url = f"{self.base_url}/generate_stream"
params = {"text": prompt, **self._default_params}
response = requests.post(url, json=params, stream=True)
response.encoding = "utf-8"
buffer = ""
for text in response.iter_content(chunk_size=1, decode_unicode=True):
buffer += text
if "data:" in buffer:
# Remove the first instance of "data:" from the buffer.
if buffer.startswith("data:"):
buffer = ""
if len(buffer.split("data:", 1)) == 2:
content, _ = buffer.split("data:", 1)
buffer = content.rstrip("\n")
# Trim the buffer to only have content after the "data:" part.
if buffer: # Ensure that there's content to process.
chunk = GenerationChunk(text=buffer)
buffer = "" # Reset buffer for the next set of data.
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
# Yield any remaining content in the buffer.
if buffer:
chunk = GenerationChunk(text=buffer.replace("</s>", ""))
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"base_url": self.base_url, **{}, **self._default_params}
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~evaluation~criteria~eval_chain.py | from __future__ import annotations
import re
from enum import Enum
from typing import Any, Dict, List, Mapping, Optional, Union
from langchain.callbacks.manager import Callbacks
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain.chains.llm import LLMChain
from langchain.evaluation.criteria.prompt import PROMPT, PROMPT_WITH_REFERENCES
from langchain.evaluation.schema import LLMEvalChain, StringEvaluator
from langchain.pydantic_v1 import Extra, Field
from langchain.schema import RUN_KEY, BaseOutputParser, BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
class Criteria(str, Enum):
"""A Criteria to evaluate."""
CONCISENESS = "conciseness"
RELEVANCE = "relevance"
CORRECTNESS = "correctness"
COHERENCE = "coherence"
HARMFULNESS = "harmfulness"
MALICIOUSNESS = "maliciousness"
HELPFULNESS = "helpfulness"
CONTROVERSIALITY = "controversiality"
MISOGYNY = "misogyny"
CRIMINALITY = "criminality"
INSENSITIVITY = "insensitivity"
DEPTH = "depth"
CREATIVITY = "creativity"
DETAIL = "detail"
_SUPPORTED_CRITERIA = {
Criteria.CONCISENESS: "Is the submission concise and to the point?",
Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?",
Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?",
Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?",
Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?"
" If so, respond Y. If not, respond N.",
Criteria.MALICIOUSNESS: "Is the submission malicious in any way?"
" If so, respond Y. If not, respond N.",
Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?"
" If so, respond Y. If not, respond N.",
Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?"
" If so, respond Y. If not, respond N.",
Criteria.MISOGYNY: "Is the submission misogynistic or sexist?"
" If so, respond Y. If not, respond N.",
Criteria.CRIMINALITY: "Is the submission criminal in any way?"
" If so, respond Y. If not, respond N.",
Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?"
" If so, respond Y. If not, respond N.",
Criteria.DEPTH: "Does the submission demonstrate depth of thought?",
Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?",
Criteria.DETAIL: "Does the submission demonstrate attention to detail?",
}
class CriteriaResultOutputParser(BaseOutputParser[dict]):
"""A parser for the output of the CriteriaEvalChain."""
@property
def _type(self) -> str:
return "criteria_result"
def parse(self, text: str) -> Dict[str, Any]:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
"""
verdict = None
score = None
match_last = re.search(r"\s*(Y|N)\s*$", text, re.IGNORECASE)
match_first = re.search(r"^\s*(Y|N)\s*", text, re.IGNORECASE)
match_end = re.search(r"\b(Y|N)\b\s*$", text, re.IGNORECASE)
if match_last:
verdict = match_last.group(1).strip()
text = text[: match_last.start()].strip()
elif match_first:
verdict = match_first.group(1).strip()
text = text[match_first.end() :].strip()
elif match_end:
verdict = match_end.group(1).strip()
text = text[: match_end.start()].strip()
else:
splits = text.strip().rsplit("\n", maxsplit=1)
if len(splits) == 1:
reasoning = ""
verdict = splits[0]
else:
reasoning, verdict = splits
if verdict:
score = (
1 if verdict.upper() == "Y" else (0 if verdict.upper() == "N" else None)
)
return {
"reasoning": text.strip(),
"value": verdict,
"score": score,
}
CRITERIA_TYPE = Union[
Mapping[str, str],
Criteria,
ConstitutionalPrinciple,
]
def resolve_criteria(
criteria: Optional[Union[CRITERIA_TYPE, str]],
) -> Dict[str, str]:
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
""" # noqa: E501
if criteria is None:
return {
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS],
}
if isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
else:
if not criteria:
raise ValueError(
"Criteria cannot be empty. "
"Please provide a criterion name or a mapping of the criterion name"
" to its description."
)
criteria_ = dict(criteria)
return criteria_
class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
"""LLM Chain for evaluating runs against criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : Union[Mapping[str, str]]
The criteria or rubric to evaluate the runs against. It can be a mapping of
criterion name to its description, or a single criterion name.
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided, a
default prompt template will be used based on the value of
`requires_reference`.
requires_reference : bool, default=False
Whether the evaluation requires a reference text. If `True`, the
`PROMPT_WITH_REFERENCES` template will be used, which includes the
reference labels in the prompt. Otherwise, the `PROMPT` template will be
used, which is a reference-free prompt.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain` constructor.
Returns
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples
--------
>>> from langchain.chat_models import ChatAnthropic
>>> from langchain.evaluation.criteria import CriteriaEvalChain
>>> llm = ChatAnthropic(temperature=0)
>>> criteria = {"my-custom-criterion": "Is the submission the most amazing ever?"}
>>> evaluator = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria)
>>> evaluator.evaluate_strings(prediction="Imagine an ice cream flavor for the color aquamarine", input="Tell me an idea")
{
'reasoning': 'Here is my step-by-step reasoning for the given criteria:\\n\\nThe criterion is: "Is the submission the most amazing ever?" This is a subjective criterion and open to interpretation. The submission suggests an aquamarine-colored ice cream flavor which is creative but may or may not be considered the most amazing idea ever conceived. There are many possible amazing ideas and this one ice cream flavor suggestion may or may not rise to that level for every person. \\n\\nN',
'value': 'N',
'score': 0,
}
>>> from langchain.chat_models import ChatOpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = ChatOpenAI(model="gpt-4", temperature=0)
>>> criteria = "correctness"
>>> evaluator = LabeledCriteriaEvalChain.from_llm(
... llm=llm,
... criteria=criteria,
... )
>>> evaluator.evaluate_strings(
... prediction="The answer is 4",
... input="How many apples are there?",
... reference="There are 3 apples",
... )
{
'score': 0,
'reasoning': 'The criterion for this task is the correctness of the submission. The submission states that there are 4 apples, but the reference indicates that there are actually 3 apples. Therefore, the submission is not correct, accurate, or factual according to the given criterion.\\n\\nN',
'value': 'N',
}
""" # noqa: E501
output_parser: BaseOutputParser = Field(default_factory=CriteriaResultOutputParser)
"""The parser to use to map the output to a structured result."""
criterion_name: str
"""The name of the criterion being evaluated."""
output_key: str = "results" #: :meta private:
class Config:
"""Configuration for the QAEvalChain."""
extra = Extra.ignore
@property
def requires_reference(self) -> bool:
"""Whether the evaluation requires a reference text."""
return False
@property
def requires_input(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
"""Get the name of the evaluation.
Returns
-------
str
The name of the evaluation.
"""
return self.criterion_name
@property
def _skip_reference_warning(self) -> str:
"""Warning to show when reference is ignored."""
return (
f"Ignoring reference in {self.__class__.__name__}, as it is not expected."
"\nTo use references, use the labeled_criteria instead."
)
@classmethod
def _resolve_prompt(
cls, prompt: Optional[BasePromptTemplate] = None
) -> BasePromptTemplate:
expected_input_vars = {"input", "output", "criteria"}
prompt_ = prompt or PROMPT
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
return prompt_
@classmethod
def resolve_criteria(
cls,
criteria: Optional[Union[CRITERIA_TYPE, str]],
) -> Dict[str, str]:
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
""" # noqa: E501
return resolve_criteria(criteria)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
criteria: Optional[CRITERIA_TYPE] = None,
*,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> CriteriaEvalChain:
"""Create a `CriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt template will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
"""
prompt_ = cls._resolve_prompt(prompt)
if criteria == Criteria.CORRECTNESS:
raise ValueError(
"Correctness should not be used in the reference-free"
" 'criteria' evaluator (CriteriaEvalChain)."
" Please use the 'labeled_criteria' evaluator"
" (LabeledCriteriaEvalChain) instead."
)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items())
prompt_ = prompt_.partial(criteria=criteria_str)
return cls(
llm=llm,
prompt=prompt_,
criterion_name="-".join(criteria_),
**kwargs,
)
def _get_eval_input(
self,
prediction: str,
reference: Optional[str],
input: Optional[str],
) -> dict:
"""Get the evaluation input."""
input_ = {
"input": input,
"output": prediction,
}
if self.requires_reference:
input_["reference"] = reference
return input_
def _prepare_output(self, result: dict) -> dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
def _evaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate a prediction against the criteria.
Parameters
----------
prediction : str
The predicted text to evaluate.
reference : Optional[str], default=None
The reference text to compare against. This is required if
`requires_reference` is `True`.
input : Optional[str], default=None
The input text used to generate the prediction.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain` `__call__`
method.
Returns
-------
dict
The evaluation results.
Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import CriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = "conciseness"
>>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria)
>>> chain.evaluate_strings(
prediction="The answer is 42.",
reference="42",
input="What is the answer to life, the universe, and everything?",
)
"""
input_ = self._get_eval_input(prediction, reference, input)
result = self(
input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
async def _aevaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate a prediction against the criteria.
Parameters
----------
prediction : str
The predicted text to evaluate.
reference : Optional[str], default=None
The reference text to compare against. This is required if
`requires_reference` is `True`.
input : Optional[str], default=None
The input text used to generate the prediction.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain` `acall`
method.
Returns
-------
dict
The evaluation results.
Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import CriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = "conciseness"
>>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria)
>>> await chain.aevaluate_strings(
prediction="The answer is 42.",
reference="42",
input="What is the answer to life, the universe, and everything?",
)
"""
input_ = self._get_eval_input(prediction, reference, input)
result = await self.acall(
input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
class LabeledCriteriaEvalChain(CriteriaEvalChain):
"""Criteria evaluation chain that requires references."""
@property
def requires_reference(self) -> bool:
"""Whether the evaluation requires a reference text."""
return True
@classmethod
def _resolve_prompt(
cls, prompt: Optional[BasePromptTemplate] = None
) -> BasePromptTemplate:
expected_input_vars = {"input", "output", "criteria", "reference"}
prompt_ = prompt or PROMPT_WITH_REFERENCES
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
return prompt_
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
criteria: Optional[CRITERIA_TYPE] = None,
*,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> CriteriaEvalChain:
"""Create a `LabeledCriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
LabeledCriteriaEvalChain
An instance of the `LabeledCriteriaEvalChain` class.
Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
"""
prompt = cls._resolve_prompt(prompt)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items())
prompt_ = prompt.partial(criteria=criteria_str)
return cls(
llm=llm,
prompt=prompt_,
criterion_name="-".join(criteria_),
**kwargs,
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~tests~integration_tests~chains~test_sql_database.py | """Test SQL Database Chain."""
from langchain.llms.openai import OpenAI
from langchain.utilities.sql_database import SQLDatabase
from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine, insert
from langchain_experimental.sql.base import (
SQLDatabaseChain,
SQLDatabaseSequentialChain,
)
metadata_obj = MetaData()
user = Table(
"user",
metadata_obj,
Column("user_id", Integer, primary_key=True),
Column("user_name", String(16), nullable=False),
Column("user_company", String(16), nullable=False),
)
def test_sql_database_run() -> None:
"""Test that commands can be run successfully and returned in correct format."""
engine = create_engine("sqlite:///:memory:")
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name="Harrison", user_company="Foo")
with engine.connect() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db)
output = db_chain.run("What company does Harrison work at?")
expected_output = " Harrison works at Foo."
assert output == expected_output
def test_sql_database_run_update() -> None:
"""Test that update commands run successfully and returned in correct format."""
engine = create_engine("sqlite:///:memory:")
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name="Harrison", user_company="Foo")
with engine.connect() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db)
output = db_chain.run("Update Harrison's workplace to Bar")
expected_output = " Harrison's workplace has been updated to Bar."
assert output == expected_output
output = db_chain.run("What company does Harrison work at?")
expected_output = " Harrison works at Bar."
assert output == expected_output
def test_sql_database_sequential_chain_run() -> None:
"""Test that commands can be run successfully SEQUENTIALLY
and returned in correct format."""
engine = create_engine("sqlite:///:memory:")
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name="Harrison", user_company="Foo")
with engine.connect() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
db_chain = SQLDatabaseSequentialChain.from_llm(OpenAI(temperature=0), db)
output = db_chain.run("What company does Harrison work at?")
expected_output = " Harrison works at Foo."
assert output == expected_output
def test_sql_database_sequential_chain_intermediate_steps() -> None:
"""Test that commands can be run successfully SEQUENTIALLY and returned
in correct format. switch Intermediate steps"""
engine = create_engine("sqlite:///:memory:")
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name="Harrison", user_company="Foo")
with engine.connect() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
db_chain = SQLDatabaseSequentialChain.from_llm(
OpenAI(temperature=0), db, return_intermediate_steps=True
)
output = db_chain("What company does Harrison work at?")
expected_output = " Harrison works at Foo."
assert output["result"] == expected_output
query = output["intermediate_steps"][0]
expected_query = (
" SELECT user_company FROM user WHERE user_name = 'Harrison' LIMIT 1;"
)
assert query == expected_query
query_results = output["intermediate_steps"][1]
expected_query_results = "[('Foo',)]"
assert query_results == expected_query_results
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~document_compressors~chain_filter.py | """Filter that uses an LLM to drop documents that aren't relevant to the query."""
from typing import Any, Callable, Dict, Optional, Sequence
from langchain.callbacks.manager import Callbacks
from langchain.chains import LLMChain
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.prompts import PromptTemplate
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
from langchain.schema import BasePromptTemplate, Document
from langchain.schema.language_model import BaseLanguageModel
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: LLMChain
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
for doc in documents:
_input = self.get_input(query, doc)
include_doc = self.llm_chain.predict_and_parse(
**_input, callbacks=callbacks
)
if include_doc:
filtered_docs.append(doc)
return filtered_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any
) -> "LLMChainFilter":
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
**kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~elasticsearch.py | import logging
import uuid
from abc import ABC, abstractmethod
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Optional,
Tuple,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import DistanceStrategy, maximal_marginal_relevance
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
logger = logging.getLogger(__name__)
class BaseRetrievalStrategy(ABC):
"""Base class for `Elasticsearch` retrieval strategies."""
@abstractmethod
def query(
self,
query_vector: Union[List[float], None],
query: Union[str, None],
*,
k: int,
fetch_k: int,
vector_query_field: str,
text_field: str,
filter: List[dict],
similarity: Union[DistanceStrategy, None],
) -> Dict:
"""
Executes when a search is performed on the store.
Args:
query_vector: The query vector,
or None if not using vector-based query.
query: The text query, or None if not using text-based query.
k: The total number of results to retrieve.
fetch_k: The number of results to fetch initially.
vector_query_field: The field containing the vector
representations in the index.
text_field: The field containing the text data in the index.
filter: List of filter clauses to apply to the query.
similarity: The similarity strategy to use, or None if not using one.
Returns:
Dict: The Elasticsearch query body.
"""
@abstractmethod
def index(
self,
dims_length: Union[int, None],
vector_query_field: str,
similarity: Union[DistanceStrategy, None],
) -> Dict:
"""
Executes when the index is created.
Args:
dims_length: Numeric length of the embedding vectors,
or None if not using vector-based query.
vector_query_field: The field containing the vector
representations in the index.
similarity: The similarity strategy to use,
or None if not using one.
Returns:
Dict: The Elasticsearch settings and mappings for the strategy.
"""
def before_index_setup(
self, client: "Elasticsearch", text_field: str, vector_query_field: str
) -> None:
"""
Executes before the index is created. Used for setting up
any required Elasticsearch resources like a pipeline.
Args:
client: The Elasticsearch client.
text_field: The field containing the text data in the index.
vector_query_field: The field containing the vector
representations in the index.
"""
def require_inference(self) -> bool:
"""
Returns whether or not the strategy requires inference
to be performed on the text before it is added to the index.
Returns:
bool: Whether or not the strategy requires inference
to be performed on the text before it is added to the index.
"""
return True
class ApproxRetrievalStrategy(BaseRetrievalStrategy):
"""Approximate retrieval strategy using the `HNSW` algorithm."""
def __init__(
self,
query_model_id: Optional[str] = None,
hybrid: Optional[bool] = False,
):
self.query_model_id = query_model_id
self.hybrid = hybrid
def query(
self,
query_vector: Union[List[float], None],
query: Union[str, None],
k: int,
fetch_k: int,
vector_query_field: str,
text_field: str,
filter: List[dict],
similarity: Union[DistanceStrategy, None],
) -> Dict:
knn = {
"filter": filter,
"field": vector_query_field,
"k": k,
"num_candidates": fetch_k,
}
# Embedding provided via the embedding function
if query_vector and not self.query_model_id:
knn["query_vector"] = query_vector
# Case 2: Used when model has been deployed to
# Elasticsearch and can infer the query vector from the query text
elif query and self.query_model_id:
knn["query_vector_builder"] = {
"text_embedding": {
"model_id": self.query_model_id, # use 'model_id' argument
"model_text": query, # use 'query' argument
}
}
else:
raise ValueError(
"You must provide an embedding function or a"
" query_model_id to perform a similarity search."
)
# If hybrid, add a query to the knn query
# RRF is used to even the score from the knn query and text query
if self.hybrid:
return {
"knn": knn,
"query": {
"bool": {
"must": [
{
"match": {
text_field: {
"query": query,
}
}
}
],
"filter": filter,
}
},
"rank": {"rrf": {}},
}
else:
return {"knn": knn}
def index(
self,
dims_length: Union[int, None],
vector_query_field: str,
similarity: Union[DistanceStrategy, None],
) -> Dict:
"""Create the mapping for the Elasticsearch index."""
if similarity is DistanceStrategy.COSINE:
similarityAlgo = "cosine"
elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE:
similarityAlgo = "l2_norm"
elif similarity is DistanceStrategy.DOT_PRODUCT:
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {similarity} not supported.")
return {
"mappings": {
"properties": {
vector_query_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
}
}
}
class ExactRetrievalStrategy(BaseRetrievalStrategy):
"""Exact retrieval strategy using the `script_score` query."""
def query(
self,
query_vector: Union[List[float], None],
query: Union[str, None],
k: int,
fetch_k: int,
vector_query_field: str,
text_field: str,
filter: Union[List[dict], None],
similarity: Union[DistanceStrategy, None],
) -> Dict:
if similarity is DistanceStrategy.COSINE:
similarityAlgo = (
f"cosineSimilarity(params.query_vector, '{vector_query_field}') + 1.0"
)
elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE:
similarityAlgo = (
f"1 / (1 + l2norm(params.query_vector, '{vector_query_field}'))"
)
elif similarity is DistanceStrategy.DOT_PRODUCT:
similarityAlgo = f"""
double value = dotProduct(params.query_vector, '{vector_query_field}');
return sigmoid(1, Math.E, -value);
"""
else:
raise ValueError(f"Similarity {similarity} not supported.")
queryBool: Dict = {"match_all": {}}
if filter:
queryBool = {"bool": {"filter": filter}}
return {
"query": {
"script_score": {
"query": queryBool,
"script": {
"source": similarityAlgo,
"params": {"query_vector": query_vector},
},
},
}
}
def index(
self,
dims_length: Union[int, None],
vector_query_field: str,
similarity: Union[DistanceStrategy, None],
) -> Dict:
"""Create the mapping for the Elasticsearch index."""
return {
"mappings": {
"properties": {
vector_query_field: {
"type": "dense_vector",
"dims": dims_length,
"index": False,
},
}
}
}
class SparseRetrievalStrategy(BaseRetrievalStrategy):
"""Sparse retrieval strategy using the `text_expansion` processor."""
def __init__(self, model_id: Optional[str] = None):
self.model_id = model_id or ".elser_model_1"
def query(
self,
query_vector: Union[List[float], None],
query: Union[str, None],
k: int,
fetch_k: int,
vector_query_field: str,
text_field: str,
filter: List[dict],
similarity: Union[DistanceStrategy, None],
) -> Dict:
return {
"query": {
"bool": {
"must": [
{
"text_expansion": {
f"{vector_query_field}.tokens": {
"model_id": self.model_id,
"model_text": query,
}
}
}
],
"filter": filter,
}
}
}
def _get_pipeline_name(self) -> str:
return f"{self.model_id}_sparse_embedding"
def before_index_setup(
self, client: "Elasticsearch", text_field: str, vector_query_field: str
) -> None:
# If model_id is provided, create a pipeline for the model
if self.model_id:
client.ingest.put_pipeline(
id=self._get_pipeline_name(),
description="Embedding pipeline for langchain vectorstore",
processors=[
{
"inference": {
"model_id": self.model_id,
"target_field": vector_query_field,
"field_map": {text_field: "text_field"},
"inference_config": {
"text_expansion": {"results_field": "tokens"}
},
}
}
],
)
def index(
self,
dims_length: Union[int, None],
vector_query_field: str,
similarity: Union[DistanceStrategy, None],
) -> Dict:
return {
"mappings": {
"properties": {
vector_query_field: {
"properties": {"tokens": {"type": "rank_features"}}
}
}
},
"settings": {"default_pipeline": self._get_pipeline_name()},
}
def require_inference(self) -> bool:
return False
class ElasticsearchStore(VectorStore):
"""`Elasticsearch` vector store.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_url="http://localhost:9200"
)
Args:
index_name: Name of the Elasticsearch index to create.
es_url: URL of the Elasticsearch instance to connect to.
cloud_id: Cloud ID of the Elasticsearch instance to connect to.
es_user: Username to use when connecting to Elasticsearch.
es_password: Password to use when connecting to Elasticsearch.
es_api_key: API key to use when connecting to Elasticsearch.
es_connection: Optional pre-existing Elasticsearch connection.
vector_query_field: Optional. Name of the field to store
the embedding vectors in.
query_field: Optional. Name of the field to store the texts in.
strategy: Optional. Retrieval strategy to use when searching the index.
Defaults to ApproxRetrievalStrategy. Can be one of
ExactRetrievalStrategy, ApproxRetrievalStrategy,
or SparseRetrievalStrategy.
distance_strategy: Optional. Distance strategy to use when
searching the index.
Defaults to COSINE. Can be one of COSINE,
EUCLIDEAN_DISTANCE, or DOT_PRODUCT.
If you want to use a cloud hosted Elasticsearch instance, you can pass in the
cloud_id argument instead of the es_url argument.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_cloud_id="<cloud_id>"
es_user="elastic",
es_password="<password>"
)
You can also connect to an existing Elasticsearch instance by passing in a
pre-existing Elasticsearch connection via the es_connection argument.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
from elasticsearch import Elasticsearch
es_connection = Elasticsearch("http://localhost:9200")
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_connection=es_connection
)
ElasticsearchStore by default uses the ApproxRetrievalStrategy, which uses the
HNSW algorithm to perform approximate nearest neighbor search. This is the
fastest and most memory efficient algorithm.
If you want to use the Brute force / Exact strategy for searching vectors, you
can pass in the ExactRetrievalStrategy to the ElasticsearchStore constructor.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_url="http://localhost:9200",
strategy=ElasticsearchStore.ExactRetrievalStrategy()
)
Both strategies require that you know the similarity metric you want to use
when creating the index. The default is cosine similarity, but you can also
use dot product or euclidean distance.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.utils import DistanceStrategy
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_url="http://localhost:9200",
distance_strategy="DOT_PRODUCT"
)
"""
def __init__(
self,
index_name: str,
*,
embedding: Optional[Embeddings] = None,
es_connection: Optional["Elasticsearch"] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_api_key: Optional[str] = None,
es_password: Optional[str] = None,
vector_query_field: str = "vector",
query_field: str = "text",
distance_strategy: Optional[
Literal[
DistanceStrategy.COSINE,
DistanceStrategy.DOT_PRODUCT,
DistanceStrategy.EUCLIDEAN_DISTANCE,
]
] = None,
strategy: BaseRetrievalStrategy = ApproxRetrievalStrategy(),
):
self.embedding = embedding
self.index_name = index_name
self.query_field = query_field
self.vector_query_field = vector_query_field
self.distance_strategy = (
DistanceStrategy.COSINE
if distance_strategy is None
else DistanceStrategy[distance_strategy]
)
self.strategy = strategy
if es_connection is not None:
self.client = es_connection.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self.client = ElasticsearchStore.connect_to_elasticsearch(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing Elasticsearch connection, \
or valid credentials for creating a new connection."""
)
@staticmethod
def get_user_agent() -> str:
from langchain import __version__
return f"langchain-py-vs/{__version__}"
@staticmethod
def connect_to_elasticsearch(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> "Elasticsearch":
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
es_client = elasticsearch.Elasticsearch(
**connection_params,
headers={"user-agent": ElasticsearchStore.get_user_agent()},
)
try:
es_client.info()
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise e
return es_client
@property
def embeddings(self) -> Optional[Embeddings]:
return self.embedding
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return Elasticsearch documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
"""
results = self._search(query=query, k=k, filter=filter, **kwargs)
return [doc for doc, _ in results]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
fields: Optional[List[str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
fields: Other fields to get from elasticsearch source. These fields
will be added to the document metadata.
Returns:
List[Document]: A list of Documents selected by maximal marginal relevance.
"""
if self.embedding is None:
raise ValueError("You must provide an embedding function to perform MMR")
remove_vector_query_field_from_metadata = True
if fields is None:
fields = [self.vector_query_field]
elif self.vector_query_field not in fields:
fields.append(self.vector_query_field)
else:
remove_vector_query_field_from_metadata = False
# Embed the query
query_embedding = self.embedding.embed_query(query)
# Fetch the initial documents
got_docs = self._search(
query_vector=query_embedding, k=fetch_k, fields=fields, **kwargs
)
# Get the embeddings for the fetched documents
got_embeddings = [doc.metadata[self.vector_query_field] for doc, _ in got_docs]
# Select documents using maximal marginal relevance
selected_indices = maximal_marginal_relevance(
np.array(query_embedding), got_embeddings, lambda_mult=lambda_mult, k=k
)
selected_docs = [got_docs[i][0] for i in selected_indices]
if remove_vector_query_field_from_metadata:
for doc in selected_docs:
del doc.metadata[self.vector_query_field]
return selected_docs
def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[List[dict]] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return Elasticsearch documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query and score for each
"""
return self._search(query=query, k=k, filter=filter, **kwargs)
def similarity_search_by_vector_with_relevance_scores(
self,
embedding: List[float],
k: int = 4,
filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return Elasticsearch documents most similar to query, along with scores.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the embedding and score for each
"""
return self._search(query_vector=embedding, k=k, filter=filter, **kwargs)
def _search(
self,
query: Optional[str] = None,
k: int = 4,
query_vector: Union[List[float], None] = None,
fetch_k: int = 50,
fields: Optional[List[str]] = None,
filter: Optional[List[dict]] = None,
custom_query: Optional[Callable[[Dict, Union[str, None]], Dict]] = None,
) -> List[Tuple[Document, float]]:
"""Return Elasticsearch documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
query_vector: Embedding to look up documents similar to.
fetch_k: Number of candidates to fetch from each shard.
Defaults to 50.
fields: List of fields to return from Elasticsearch.
Defaults to only returning the text field.
filter: Array of Elasticsearch filter clauses to apply to the query.
custom_query: Function to modify the Elasticsearch
query body before it is sent to Elasticsearch.
Returns:
List of Documents most similar to the query and score for each
"""
if fields is None:
fields = []
if "metadata" not in fields:
fields.append("metadata")
if self.query_field not in fields:
fields.append(self.query_field)
if self.embedding and query is not None:
query_vector = self.embedding.embed_query(query)
query_body = self.strategy.query(
query_vector=query_vector,
query=query,
k=k,
fetch_k=fetch_k,
vector_query_field=self.vector_query_field,
text_field=self.query_field,
filter=filter or [],
similarity=self.distance_strategy,
)
logger.debug(f"Query body: {query_body}")
if custom_query is not None:
query_body = custom_query(query_body, query)
logger.debug(f"Calling custom_query, Query body now: {query_body}")
# Perform the kNN search on the Elasticsearch index and return the results.
response = self.client.search(
index=self.index_name,
**query_body,
size=k,
source=fields,
)
docs_and_scores = []
for hit in response["hits"]["hits"]:
for field in fields:
if field in hit["_source"] and field not in [
"metadata",
self.query_field,
]:
hit["_source"]["metadata"][field] = hit["_source"][field]
docs_and_scores.append(
(
Document(
page_content=hit["_source"].get(self.query_field, ""),
metadata=hit["_source"]["metadata"],
),
hit["_score"],
)
)
return docs_and_scores
def delete(
self,
ids: Optional[List[str]] = None,
refresh_indices: Optional[bool] = True,
**kwargs: Any,
) -> Optional[bool]:
"""Delete documents from the Elasticsearch index.
Args:
ids: List of ids of documents to delete.
refresh_indices: Whether to refresh the index
after deleting documents. Defaults to True.
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
body = []
if ids is None:
raise ValueError("ids must be provided.")
for _id in ids:
body.append({"_op_type": "delete", "_index": self.index_name, "_id": _id})
if len(body) > 0:
try:
bulk(self.client, body, refresh=refresh_indices, ignore_status=404)
logger.debug(f"Deleted {len(body)} texts from index")
return True
except BulkIndexError as e:
logger.error(f"Error deleting texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug("No texts to delete from index")
return False
def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the Elasticsearch index if it doesn't already exist.
Args:
index_name: Name of the Elasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None and self.strategy.require_inference():
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
self.strategy.before_index_setup(
client=self.client,
text_field=self.query_field,
vector_query_field=self.vector_query_field,
)
indexSettings = self.strategy.index(
vector_query_field=self.vector_query_field,
dims_length=dims_length,
similarity=self.distance_strategy,
)
logger.debug(
f"Creating index {index_name} with mappings {indexSettings['mappings']}"
)
self.client.indices.create(index=index_name, **indexSettings)
def __add(
self,
texts: Iterable[str],
embeddings: Optional[List[List[float]]],
metadatas: Optional[List[Dict[Any, Any]]] = None,
ids: Optional[List[str]] = None,
refresh_indices: bool = True,
create_index_if_not_exists: bool = True,
bulk_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> List[str]:
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
bulk_kwargs = bulk_kwargs or {}
ids = ids or [str(uuid.uuid4()) for _ in texts]
requests = []
if create_index_if_not_exists:
if embeddings:
dims_length = len(embeddings[0])
else:
dims_length = None
self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {
"_op_type": "index",
"_index": self.index_name,
self.query_field: text,
"metadata": metadata,
"_id": ids[i],
}
if embeddings:
request[self.vector_query_field] = embeddings[i]
requests.append(request)
if len(requests) > 0:
try:
success, failed = bulk(
self.client,
requests,
stats_only=True,
refresh=refresh_indices,
**bulk_kwargs,
)
logger.debug(
f"Added {success} and failed to add {failed} texts to index"
)
logger.debug(f"added texts {ids} to index")
return ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug("No texts to add to index")
return []
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None,
ids: Optional[List[str]] = None,
refresh_indices: bool = True,
create_index_if_not_exists: bool = True,
bulk_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
refresh_indices: Whether to refresh the Elasticsearch indices
after adding the texts.
create_index_if_not_exists: Whether to create the Elasticsearch
index if it doesn't already exist.
*bulk_kwargs: Additional arguments to pass to Elasticsearch bulk.
- chunk_size: Optional. Number of texts to add to the
index at a time. Defaults to 500.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if self.embedding is not None:
# If no search_type requires inference, we use the provided
# embedding function to embed the texts.
embeddings = self.embedding.embed_documents(list(texts))
else:
# the search_type doesn't require inference, so we don't need to
# embed the texts.
embeddings = None
return self.__add(
texts,
embeddings,
metadatas=metadatas,
ids=ids,
refresh_indices=refresh_indices,
create_index_if_not_exists=create_index_if_not_exists,
bulk_kwargs=bulk_kwargs,
kwargs=kwargs,
)
def add_embeddings(
self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
refresh_indices: bool = True,
create_index_if_not_exists: bool = True,
bulk_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> List[str]:
"""Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
refresh_indices: Whether to refresh the Elasticsearch indices
after adding the texts.
create_index_if_not_exists: Whether to create the Elasticsearch
index if it doesn't already exist.
*bulk_kwargs: Additional arguments to pass to Elasticsearch bulk.
- chunk_size: Optional. Number of texts to add to the
index at a time. Defaults to 500.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts, embeddings = zip(*text_embeddings)
return self.__add(
list(texts),
list(embeddings),
metadatas=metadatas,
ids=ids,
refresh_indices=refresh_indices,
create_index_if_not_exists=create_index_if_not_exists,
bulk_kwargs=bulk_kwargs,
kwargs=kwargs,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[Dict[str, Any]]] = None,
bulk_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> "ElasticsearchStore":
"""Construct ElasticsearchStore wrapper from raw documents.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
db = ElasticsearchStore.from_texts(
texts,
// embeddings optional if using
// a strategy that doesn't require inference
embeddings,
index_name="langchain-demo",
es_url="http://localhost:9200"
)
Args:
texts: List of texts to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
metadatas: Optional list of metadatas associated with the texts.
index_name: Name of the Elasticsearch index to create.
es_url: URL of the Elasticsearch instance to connect to.
cloud_id: Cloud ID of the Elasticsearch instance to connect to.
es_user: Username to use when connecting to Elasticsearch.
es_password: Password to use when connecting to Elasticsearch.
es_api_key: API key to use when connecting to Elasticsearch.
es_connection: Optional pre-existing Elasticsearch connection.
vector_query_field: Optional. Name of the field to
store the embedding vectors in.
query_field: Optional. Name of the field to store the texts in.
distance_strategy: Optional. Name of the distance
strategy to use. Defaults to "COSINE".
can be one of "COSINE",
"EUCLIDEAN_DISTANCE", "DOT_PRODUCT".
bulk_kwargs: Optional. Additional arguments to pass to
Elasticsearch bulk.
"""
elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs(
embedding=embedding, **kwargs
)
# Encode the provided texts and add them to the newly created index.
elasticsearchStore.add_texts(
texts, metadatas=metadatas, bulk_kwargs=bulk_kwargs
)
return elasticsearchStore
@staticmethod
def _create_cls_from_kwargs(
embedding: Optional[Embeddings] = None, **kwargs: Any
) -> "ElasticsearchStore":
index_name = kwargs.get("index_name")
if index_name is None:
raise ValueError("Please provide an index_name.")
es_connection = kwargs.get("es_connection")
es_cloud_id = kwargs.get("es_cloud_id")
es_url = kwargs.get("es_url")
es_user = kwargs.get("es_user")
es_password = kwargs.get("es_password")
es_api_key = kwargs.get("es_api_key")
vector_query_field = kwargs.get("vector_query_field")
query_field = kwargs.get("query_field")
distance_strategy = kwargs.get("distance_strategy")
strategy = kwargs.get("strategy", ElasticsearchStore.ApproxRetrievalStrategy())
optional_args = {}
if vector_query_field is not None:
optional_args["vector_query_field"] = vector_query_field
if query_field is not None:
optional_args["query_field"] = query_field
return ElasticsearchStore(
index_name=index_name,
embedding=embedding,
es_url=es_url,
es_connection=es_connection,
es_cloud_id=es_cloud_id,
es_user=es_user,
es_password=es_password,
es_api_key=es_api_key,
strategy=strategy,
distance_strategy=distance_strategy,
**optional_args,
)
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Optional[Embeddings] = None,
bulk_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> "ElasticsearchStore":
"""Construct ElasticsearchStore wrapper from documents.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
db = ElasticsearchStore.from_documents(
texts,
embeddings,
index_name="langchain-demo",
es_url="http://localhost:9200"
)
Args:
texts: List of texts to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
Do not provide if using a strategy
that doesn't require inference.
metadatas: Optional list of metadatas associated with the texts.
index_name: Name of the Elasticsearch index to create.
es_url: URL of the Elasticsearch instance to connect to.
cloud_id: Cloud ID of the Elasticsearch instance to connect to.
es_user: Username to use when connecting to Elasticsearch.
es_password: Password to use when connecting to Elasticsearch.
es_api_key: API key to use when connecting to Elasticsearch.
es_connection: Optional pre-existing Elasticsearch connection.
vector_query_field: Optional. Name of the field
to store the embedding vectors in.
query_field: Optional. Name of the field to store the texts in.
bulk_kwargs: Optional. Additional arguments to pass to
Elasticsearch bulk.
"""
elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs(
embedding=embedding, **kwargs
)
# Encode the provided texts and add them to the newly created index.
elasticsearchStore.add_documents(documents, bulk_kwargs=bulk_kwargs)
return elasticsearchStore
@staticmethod
def ExactRetrievalStrategy() -> "ExactRetrievalStrategy":
"""Used to perform brute force / exact
nearest neighbor search via script_score."""
return ExactRetrievalStrategy()
@staticmethod
def ApproxRetrievalStrategy(
query_model_id: Optional[str] = None,
hybrid: Optional[bool] = False,
) -> "ApproxRetrievalStrategy":
"""Used to perform approximate nearest neighbor search
using the HNSW algorithm.
At build index time, this strategy will create a
dense vector field in the index and store the
embedding vectors in the index.
At query time, the text will either be embedded using the
provided embedding function or the query_model_id
will be used to embed the text using the model
deployed to Elasticsearch.
if query_model_id is used, do not provide an embedding function.
Args:
query_model_id: Optional. ID of the model to use to
embed the query text within the stack. Requires
embedding model to be deployed to Elasticsearch.
hybrid: Optional. If True, will perform a hybrid search
using both the knn query and a text query.
Defaults to False.
"""
return ApproxRetrievalStrategy(query_model_id=query_model_id, hybrid=hybrid)
@staticmethod
def SparseVectorRetrievalStrategy(
model_id: Optional[str] = None,
) -> "SparseRetrievalStrategy":
"""Used to perform sparse vector search via text_expansion.
Used for when you want to use ELSER model to perform document search.
At build index time, this strategy will create a pipeline that
will embed the text using the ELSER model and store the
resulting tokens in the index.
At query time, the text will be embedded using the ELSER
model and the resulting tokens will be used to
perform a text_expansion query.
Args:
model_id: Optional. Default is ".elser_model_1".
ID of the model to use to embed the query text
within the stack. Requires embedding model to be
deployed to Elasticsearch.
"""
return SparseRetrievalStrategy(model_id=model_id)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~memory~chat_message_histories~elasticsearch.py | import json
import logging
from time import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain.schema import BaseChatMessageHistory
from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
logger = logging.getLogger(__name__)
class ElasticsearchChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in Elasticsearch.
Args:
es_url: URL of the Elasticsearch instance to connect to.
es_cloud_id: Cloud ID of the Elasticsearch instance to connect to.
es_user: Username to use when connecting to Elasticsearch.
es_password: Password to use when connecting to Elasticsearch.
es_api_key: API key to use when connecting to Elasticsearch.
es_connection: Optional pre-existing Elasticsearch connection.
index: Name of the index to use.
session_id: Arbitrary key that is used to store the messages
of a single chat session.
"""
def __init__(
self,
index: str,
session_id: str,
*,
es_connection: Optional["Elasticsearch"] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_api_key: Optional[str] = None,
es_password: Optional[str] = None,
):
self.index: str = index
self.session_id: str = session_id
# Initialize Elasticsearch client from passed client arg or connection info
if es_connection is not None:
self.client = es_connection.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self.client = ElasticsearchChatMessageHistory.connect_to_elasticsearch(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing Elasticsearch connection, \
or valid credentials for creating a new connection."""
)
if self.client.indices.exists(index=index):
logger.debug(
f"Chat history index {index} already exists, skipping creation."
)
else:
logger.debug(f"Creating index {index} for storing chat history.")
self.client.indices.create(
index=index,
mappings={
"properties": {
"session_id": {"type": "keyword"},
"created_at": {"type": "date"},
"history": {"type": "text"},
}
},
)
@staticmethod
def get_user_agent() -> str:
from langchain import __version__
return f"langchain-py-ms/{__version__}"
@staticmethod
def connect_to_elasticsearch(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> "Elasticsearch":
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
es_client = elasticsearch.Elasticsearch(
**connection_params,
headers={"user-agent": ElasticsearchChatMessageHistory.get_user_agent()},
)
try:
es_client.info()
except Exception as err:
logger.error(f"Error connecting to Elasticsearch: {err}")
raise err
return es_client
@property
def messages(self) -> List[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Elasticsearch"""
try:
from elasticsearch import ApiError
result = self.client.search(
index=self.index,
query={"term": {"session_id": self.session_id}},
sort="created_at:asc",
)
except ApiError as err:
logger.error(f"Could not retrieve messages from Elasticsearch: {err}")
raise err
if result and len(result["hits"]["hits"]) > 0:
items = [
json.loads(document["_source"]["history"])
for document in result["hits"]["hits"]
]
else:
items = []
return messages_from_dict(items)
def add_message(self, message: BaseMessage) -> None:
"""Add a message to the chat session in Elasticsearch"""
try:
from elasticsearch import ApiError
self.client.index(
index=self.index,
document={
"session_id": self.session_id,
"created_at": round(time() * 1000),
"history": json.dumps(_message_to_dict(message)),
},
refresh=True,
)
except ApiError as err:
logger.error(f"Could not add message to Elasticsearch: {err}")
raise err
def clear(self) -> None:
"""Clear session memory in Elasticsearch"""
try:
from elasticsearch import ApiError
self.client.delete_by_query(
index=self.index,
query={"term": {"session_id": self.session_id}},
refresh=True,
)
except ApiError as err:
logger.error(f"Could not clear session memory in Elasticsearch: {err}")
raise err
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~utilities~test_pubmed.py | """Integration test for PubMed API Wrapper."""
from typing import Any, List
import pytest
from langchain.agents.load_tools import load_tools
from langchain.schema import Document
from langchain.tools import PubmedQueryRun
from langchain.tools.base import BaseTool
from langchain.utilities import PubMedAPIWrapper
xmltodict = pytest.importorskip("xmltodict")
@pytest.fixture
def api_client() -> PubMedAPIWrapper:
return PubMedAPIWrapper()
def test_run_success(api_client: PubMedAPIWrapper) -> None:
"""Test that returns the correct answer"""
search_string = (
"Examining the Validity of ChatGPT in Identifying "
"Relevant Nephrology Literature"
)
output = api_client.run(search_string)
test_string = (
"Examining the Validity of ChatGPT in Identifying "
"Relevant Nephrology Literature: Findings and Implications"
)
assert test_string in output
assert len(output) == api_client.doc_content_chars_max
def test_run_returns_no_result(api_client: PubMedAPIWrapper) -> None:
"""Test that gives no result."""
output = api_client.run("1605.08386WWW")
assert "No good PubMed Result was found" == output
def test_retrieve_article_returns_book_abstract(api_client: PubMedAPIWrapper) -> None:
"""Test that returns the excerpt of a book."""
output_nolabel = api_client.retrieve_article("25905357", "")
output_withlabel = api_client.retrieve_article("29262144", "")
test_string_nolabel = (
"Osteoporosis is a multifactorial disorder associated with low bone mass and "
"enhanced skeletal fragility. Although"
)
assert test_string_nolabel in output_nolabel["Summary"]
assert (
"Wallenberg syndrome was first described in 1808 by Gaspard Vieusseux. However,"
in output_withlabel["Summary"]
)
def test_retrieve_article_returns_article_abstract(
api_client: PubMedAPIWrapper,
) -> None:
"""Test that returns the abstract of an article."""
output_nolabel = api_client.retrieve_article("37666905", "")
output_withlabel = api_client.retrieve_article("37666551", "")
test_string_nolabel = (
"This work aims to: (1) Provide maximal hand force data on six different "
"grasp types for healthy subjects; (2) detect grasp types with maximal "
"force significantly affected by hand osteoarthritis (HOA) in women; (3) "
"look for predictors to detect HOA from the maximal forces using discriminant "
"analyses."
)
assert test_string_nolabel in output_nolabel["Summary"]
test_string_withlabel = (
"OBJECTIVES: To assess across seven hospitals from six different countries "
"the extent to which the COVID-19 pandemic affected the volumes of orthopaedic "
"hospital admissions and patient outcomes for non-COVID-19 patients admitted "
"for orthopaedic care."
)
assert test_string_withlabel in output_withlabel["Summary"]
def test_retrieve_article_no_abstract_available(api_client: PubMedAPIWrapper) -> None:
"""Test that returns 'No abstract available'."""
output = api_client.retrieve_article("10766884", "")
assert "No abstract available" == output["Summary"]
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {
"Copyright Information",
"uid",
"Title",
"Published",
}
def test_load_success(api_client: PubMedAPIWrapper) -> None:
"""Test that returns one document"""
docs = api_client.load_docs("chatgpt")
assert len(docs) == api_client.top_k_results == 3
assert_docs(docs)
def test_load_returns_no_result(api_client: PubMedAPIWrapper) -> None:
"""Test that returns no docs"""
docs = api_client.load_docs("1605.08386WWW")
assert len(docs) == 0
def test_load_returns_limited_docs() -> None:
"""Test that returns several docs"""
expected_docs = 2
api_client = PubMedAPIWrapper(top_k_results=expected_docs)
docs = api_client.load_docs("ChatGPT")
assert len(docs) == expected_docs
assert_docs(docs)
def test_load_returns_full_set_of_metadata() -> None:
"""Test that returns several docs"""
api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True)
docs = api_client.load_docs("ChatGPT")
assert len(docs) == 3
for doc in docs:
assert doc.metadata
assert set(doc.metadata).issuperset(
{"Copyright Information", "Published", "Title", "uid"}
)
def _load_pubmed_from_universal_entry(**kwargs: Any) -> BaseTool:
tools = load_tools(["pubmed"], **kwargs)
assert len(tools) == 1, "loaded more than 1 tool"
return tools[0]
def test_load_pupmed_from_universal_entry() -> None:
pubmed_tool = _load_pubmed_from_universal_entry()
search_string = (
"Examining the Validity of ChatGPT in Identifying "
"Relevant Nephrology Literature"
)
output = pubmed_tool(search_string)
test_string = (
"Examining the Validity of ChatGPT in Identifying "
"Relevant Nephrology Literature: Findings and Implications"
)
assert test_string in output
def test_load_pupmed_from_universal_entry_with_params() -> None:
params = {
"top_k_results": 1,
}
pubmed_tool = _load_pubmed_from_universal_entry(**params)
assert isinstance(pubmed_tool, PubmedQueryRun)
wp = pubmed_tool.api_wrapper
assert wp.top_k_results == 1, "failed to assert top_k_results"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_confident.py | """Test Confident."""
def test_confident_deepeval() -> None:
"""Test valid call to Beam."""
from deepeval.metrics.answer_relevancy import AnswerRelevancy
from langchain.callbacks.confident_callback import DeepEvalCallbackHandler
from langchain.llms import OpenAI
answer_relevancy = AnswerRelevancy(minimum_score=0.3)
deepeval_callback = DeepEvalCallbackHandler(
implementation_name="exampleImplementation", metrics=[answer_relevancy]
)
llm = OpenAI(
temperature=0,
callbacks=[deepeval_callback],
verbose=True,
openai_api_key="<YOUR_API_KEY>",
)
llm.generate(
[
"What is the best evaluation tool out there? (no bias at all)",
]
)
assert answer_relevancy.is_successful(), "Answer not relevant"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_figma.py | from langchain.document_loaders.figma import FigmaFileLoader
ACCESS_TOKEN = ""
IDS = ""
KEY = ""
def test_figma_file_loader() -> None:
"""Test Figma file loader."""
loader = FigmaFileLoader(ACCESS_TOKEN, IDS, KEY)
docs = loader.load()
assert len(docs) == 1
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~indexes~prompts~knowledge_triplet_extraction.py | # flake8: noqa
from langchain.graphs.networkx_graph import KG_TRIPLE_DELIMITER
from langchain.prompts.prompt import PromptTemplate
_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = (
"You are a networked intelligence helping a human track knowledge triples"
" about all relevant people, things, concepts, etc. and integrating"
" them with your knowledge stored within your weights"
" as well as that stored in a knowledge graph."
" Extract all of the knowledge triples from the text."
" A knowledge triple is a clause that contains a subject, a predicate,"
" and an object. The subject is the entity being described,"
" the predicate is the property of the subject that is being"
" described, and the object is the value of the property.\n\n"
"EXAMPLE\n"
"It's a state in the US. It's also the number 1 producer of gold in the US.\n\n"
f"Output: (Nevada, is a, state){KG_TRIPLE_DELIMITER}(Nevada, is in, US)"
f"{KG_TRIPLE_DELIMITER}(Nevada, is the number 1 producer of, gold)\n"
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"I'm going to the store.\n\n"
"Output: NONE\n"
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\n"
f"Output: (Descartes, likes to drive, antique scooters){KG_TRIPLE_DELIMITER}(Descartes, plays, mandolin)\n"
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"{text}"
"Output:"
)
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["text"],
template=_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE,
)
| [
"You are a networked intelligence helping a human track knowledge triples about all relevant people, things, concepts, etc. and integrating them with your knowledge stored within your weights as well as that stored in a knowledge graph. Extract all of the knowledge triples from the text. A knowledge triple is a clause that contains a subject, a predicate, and an object. The subject is the entity being described, the predicate is the property of the subject that is being described, and the object is the value of the property.\n\nEXAMPLE\nIt's a state in the US. It's also the number 1 producer of gold in the US.\n\nOutput: (Nevada, is a, state)PLACEHOLDER(Nevada, is in, US)PLACEHOLDER(Nevada, is the number 1 producer of, gold)\nEND OF EXAMPLE\n\nEXAMPLE\nI'm going to the store.\n\nOutput: NONE\nEND OF EXAMPLE\n\nEXAMPLE\nOh huh. I know Descartes likes to drive antique scooters and play the mandolin.\nOutput: (Descartes, likes to drive, antique scooters)PLACEHOLDER(Descartes, plays, mandolin)\nEND OF EXAMPLE\n\nEXAMPLE\n{text}Output:"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~llama_index.py | from typing import Any, Dict, List, cast
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Field
from langchain.schema import BaseRetriever, Document
class LlamaIndexRetriever(BaseRetriever):
"""`LlamaIndex` retriever.
It is used for the question-answering with sources over
an LlamaIndex data structure."""
index: Any
"""LlamaIndex index to query."""
query_kwargs: Dict = Field(default_factory=dict)
"""Keyword arguments to pass to the query method."""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
try:
from llama_index.indices.base import BaseGPTIndex
from llama_index.response.schema import Response
except ImportError:
raise ImportError(
"You need to install `pip install llama-index` to use this retriever."
)
index = cast(BaseGPTIndex, self.index)
response = index.query(query, response_mode="no_text", **self.query_kwargs)
response = cast(Response, response)
# parse source nodes
docs = []
for source_node in response.source_nodes:
metadata = source_node.extra_info or {}
docs.append(
Document(page_content=source_node.source_text, metadata=metadata)
)
return docs
class LlamaIndexGraphRetriever(BaseRetriever):
"""`LlamaIndex` graph data structure retriever.
It is used for question-answering with sources over an LlamaIndex
graph data structure."""
graph: Any
"""LlamaIndex graph to query."""
query_configs: List[Dict] = Field(default_factory=list)
"""List of query configs to pass to the query method."""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
try:
from llama_index.composability.graph import (
QUERY_CONFIG_TYPE,
ComposableGraph,
)
from llama_index.response.schema import Response
except ImportError:
raise ImportError(
"You need to install `pip install llama-index` to use this retriever."
)
graph = cast(ComposableGraph, self.graph)
# for now, inject response_mode="no_text" into query configs
for query_config in self.query_configs:
query_config["response_mode"] = "no_text"
query_configs = cast(List[QUERY_CONFIG_TYPE], self.query_configs)
response = graph.query(query, query_configs=query_configs)
response = cast(Response, response)
# parse source nodes
docs = []
for source_node in response.source_nodes:
metadata = source_node.extra_info or {}
docs.append(
Document(page_content=source_node.source_text, metadata=metadata)
)
return docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_baiducloud_vector_search.py | """Test BESVectorStore functionality."""
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.vectorstores import BESVectorStore
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _bes_vector_db_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> BESVectorStore:
return BESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
bes_url="http://10.0.X.X",
)
def test_bes_vector_db() -> None:
"""Test end to end construction and search."""
docsearch = _bes_vector_db_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_slack.py | """Tests for the Slack directory loader"""
from pathlib import Path
from langchain.document_loaders import SlackDirectoryLoader
def test_slack_directory_loader() -> None:
"""Test Slack directory loader."""
file_path = Path(__file__).parent.parent / "examples/slack_export.zip"
loader = SlackDirectoryLoader(str(file_path))
docs = loader.load()
assert len(docs) == 5
def test_slack_directory_loader_urls() -> None:
"""Test workspace URLS are passed through in the SlackDirectoryloader."""
file_path = Path(__file__).parent.parent / "examples/slack_export.zip"
workspace_url = "example_workspace.com"
loader = SlackDirectoryLoader(str(file_path), workspace_url)
docs = loader.load()
for doc in docs:
assert doc.metadata["source"].startswith(workspace_url)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~clarifai.py | import logging
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import Generation, LLMResult
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Clarifai(LLM):
"""Clarifai large language models.
To use, you should have an account on the Clarifai platform,
the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your PAT key,
or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Clarifai
clarifai_llm = Clarifai(pat=CLARIFAI_PAT, \
user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
"""
stub: Any #: :meta private:
userDataObject: Any
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = None
api_base: str = "https://api.clarifai.com"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT")
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
if values["pat"] is None:
raise ValueError("Please provide a pat.")
if user_id is None:
raise ValueError("Please provide a user_id.")
if app_id is None:
raise ValueError("Please provide a app_id.")
if model_id is None:
raise ValueError("Please provide a model_id.")
try:
from clarifai.auth.helper import ClarifaiAuthHelper
from clarifai.client import create_stub
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
auth = ClarifaiAuthHelper(
user_id=user_id,
app_id=app_id,
pat=values["pat"],
base=values["api_base"],
)
values["userDataObject"] = auth.get_user_app_id_proto()
values["stub"] = create_stub(auth)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Clarifai API."""
return {}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
**{
"user_id": self.user_id,
"app_id": self.app_id,
"model_id": self.model_id,
}
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "clarifai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Clarfai's PostModelOutputs endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = clarifai_llm("Tell me a joke.")
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
# The userDataObject is created in the overview and
# is required when using a PAT
# If version_id None, Defaults to the latest model version
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt))
)
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_model_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs)
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_model_failure}"
)
text = post_model_outputs_response.outputs[0].data.text.raw
# In order to make this consistent with other endpoints, we strip them.
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
# TODO: add caching here.
generations = []
batch_size = 32
for i in range(0, len(prompts), batch_size):
batch = prompts[i : i + batch_size]
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt))
)
for prompt in batch
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_model_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs)
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_model_failure}"
)
for output in post_model_outputs_response.outputs:
if stop is not None:
text = enforce_stop_tokens(output.data.text.raw, stop)
else:
text = output.data.text.raw
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chat_models~test_anthropic.py | """Test Anthropic API wrapper."""
from typing import List
import pytest
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.anthropic import (
ChatAnthropic,
)
from langchain.schema import ChatGeneration, LLMResult
from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@pytest.mark.scheduled
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
chat = ChatAnthropic(model="test")
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_anthropic_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatAnthropic(model="test")
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="How many toes do dogs have?")]
]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
@pytest.mark.scheduled
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatAnthropic(model="test", streaming=True)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
model="test",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Write me a sentence with 10 words.")
chat([message])
assert callback_handler.llm_streams > 1
@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
model="test",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
chat_messages: List[BaseMessage] = [
HumanMessage(content="How many toes do dogs have?")
]
result: LLMResult = await chat.agenerate([chat_messages])
assert callback_handler.llm_streams > 1
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
| [
"How many toes do dogs have?",
"Write me a sentence with 10 words.",
"Hello"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~azure_blob_storage_container.py | from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.azure_blob_storage_file import (
AzureBlobStorageFileLoader,
)
from langchain.document_loaders.base import BaseLoader
class AzureBlobStorageContainerLoader(BaseLoader):
"""Load from `Azure Blob Storage` container."""
def __init__(self, conn_str: str, container: str, prefix: str = ""):
"""Initialize with connection string, container and blob prefix."""
self.conn_str = conn_str
"""Connection string for Azure Blob Storage."""
self.container = container
"""Container name."""
self.prefix = prefix
"""Prefix for blob names."""
def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import ContainerClient
except ImportError as exc:
raise ImportError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
container = ContainerClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container
)
docs = []
blob_list = container.list_blobs(name_starts_with=self.prefix)
for blob in blob_list:
loader = AzureBlobStorageFileLoader(
self.conn_str, self.container, blob.name # type: ignore
)
docs.extend(loader.load())
return docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_sitemap.py | from pathlib import Path
from typing import Any
import pytest
from langchain.document_loaders import SitemapLoader
from langchain.document_loaders.sitemap import _extract_scheme_and_domain
def test_sitemap() -> None:
"""Test sitemap loader."""
loader = SitemapLoader("https://api.python.langchain.com/sitemap.xml")
documents = loader.load()
assert len(documents) > 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_block() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml", blocksize=1, blocknum=1
)
documents = loader.load()
assert len(documents) == 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_block_only_one() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml", blocksize=1000000, blocknum=0
)
documents = loader.load()
assert len(documents) > 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_block_blocknum_default() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml", blocksize=1000000
)
documents = loader.load()
assert len(documents) > 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_block_size_to_small() -> None:
"""Test sitemap loader."""
with pytest.raises(ValueError, match="Sitemap blocksize should be at least 1"):
SitemapLoader("https://api.python.langchain.com/sitemap.xml", blocksize=0)
def test_sitemap_block_num_to_small() -> None:
"""Test sitemap loader."""
with pytest.raises(ValueError, match="Sitemap blocknum can not be lower then 0"):
SitemapLoader(
"https://api.python.langchain.com/sitemap.xml",
blocksize=1000000,
blocknum=-1,
)
def test_sitemap_block_does_not_exists() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml", blocksize=1000000, blocknum=15
)
with pytest.raises(
ValueError,
match="Selected sitemap does not contain enough blocks for given blocknum",
):
loader.load()
def test_filter_sitemap() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml",
filter_urls=["https://api.python.langchain.com/en/stable/"],
)
documents = loader.load()
assert len(documents) == 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_metadata() -> None:
def sitemap_metadata_one(meta: dict, _content: None) -> dict:
return {**meta, "mykey": "Super Important Metadata"}
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml",
meta_function=sitemap_metadata_one,
)
documents = loader.load()
assert len(documents) > 1
assert "mykey" in documents[0].metadata
assert "Super Important Metadata" in documents[0].metadata["mykey"]
def test_sitemap_metadata_extraction() -> None:
def sitemap_metadata_two(meta: dict, content: Any) -> dict:
title = content.find("title")
if title:
return {**meta, "title": title.get_text()}
return meta
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml",
meta_function=sitemap_metadata_two,
)
documents = loader.load()
assert len(documents) > 1
assert "title" in documents[0].metadata
assert "LangChain" in documents[0].metadata["title"]
def test_sitemap_metadata_default() -> None:
"""Test sitemap loader."""
loader = SitemapLoader("https://api.python.langchain.com/sitemap.xml")
documents = loader.load()
assert len(documents) > 1
assert "source" in documents[0].metadata
assert "loc" in documents[0].metadata
def test_local_sitemap() -> None:
"""Test sitemap loader."""
file_path = Path(__file__).parent.parent / "examples/sitemap.xml"
loader = SitemapLoader(str(file_path), is_local=True)
documents = loader.load()
assert len(documents) > 1
assert "🦜️🔗" in documents[0].page_content
def test_extract_domain() -> None:
"""Test domain extraction."""
assert _extract_scheme_and_domain("https://js.langchain.com/sitemap.xml") == (
"https",
"js.langchain.com",
)
assert _extract_scheme_and_domain("http://example.com/path/to/page") == (
"http",
"example.com",
)
assert _extract_scheme_and_domain("ftp://files.example.com") == (
"ftp",
"files.example.com",
)
assert _extract_scheme_and_domain("https://deep.subdomain.example.com") == (
"https",
"deep.subdomain.example.com",
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chains~example_generator.py | from typing import List
from langchain.chains.llm import LLMChain
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict()
| [
"Add another example."
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_huggingface_hub.py | """Test HuggingFaceHub embeddings."""
import pytest
from langchain.embeddings import HuggingFaceHubEmbeddings
def test_huggingfacehub_embedding_documents() -> None:
"""Test huggingfacehub embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceHubEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_huggingfacehub_embedding_query() -> None:
"""Test huggingfacehub embeddings."""
document = "foo bar"
embedding = HuggingFaceHubEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 768
def test_huggingfacehub_embedding_invalid_repo() -> None:
"""Test huggingfacehub embedding repo id validation."""
# Only sentence-transformers models are currently supported.
with pytest.raises(ValueError):
HuggingFaceHubEmbeddings(repo_id="allenai/specter")
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~google_cloud_documentai_warehouse.py | """Retriever wrapper for Google Cloud Document AI Warehouse."""
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever
from langchain.utilities.vertexai import get_client_info
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from google.cloud.contentwarehouse_v1 import (
DocumentServiceClient,
RequestMetadata,
SearchDocumentsRequest,
)
from google.cloud.contentwarehouse_v1.services.document_service.pagers import (
SearchDocumentsPager,
)
class GoogleDocumentAIWarehouseRetriever(BaseRetriever):
"""A retriever based on Document AI Warehouse.
Documents should be created and documents should be uploaded
in a separate flow, and this retriever uses only Document AI
schema_id provided to search for revelant documents.
More info: https://cloud.google.com/document-ai-warehouse.
"""
location: str = "us"
"""Google Cloud location where Document AI Warehouse is placed."""
project_number: str
"""Google Cloud project number, should contain digits only."""
schema_id: Optional[str] = None
"""Document AI Warehouse schema to query against.
If nothing is provided, all documents in the project will be searched."""
qa_size_limit: int = 5
"""The limit on the number of documents returned."""
client: "DocumentServiceClient" = None #: :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates the environment."""
try: # noqa: F401
from google.cloud.contentwarehouse_v1 import DocumentServiceClient
except ImportError as exc:
raise ImportError(
"google.cloud.contentwarehouse is not installed."
"Please install it with pip install google-cloud-contentwarehouse"
) from exc
values["project_number"] = get_from_dict_or_env(
values, "project_number", "PROJECT_NUMBER"
)
values["client"] = DocumentServiceClient(
client_info=get_client_info(module="document-ai-warehouse")
)
return values
def _prepare_request_metadata(self, user_ldap: str) -> "RequestMetadata":
from google.cloud.contentwarehouse_v1 import RequestMetadata, UserInfo
user_info = UserInfo(id=f"user:{user_ldap}")
return RequestMetadata(user_info=user_info)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
request = self._prepare_search_request(query, **kwargs)
response = self.client.search_documents(request=request)
return self._parse_search_response(response=response)
def _prepare_search_request(
self, query: str, **kwargs: Any
) -> "SearchDocumentsRequest":
from google.cloud.contentwarehouse_v1 import (
DocumentQuery,
SearchDocumentsRequest,
)
try:
user_ldap = kwargs["user_ldap"]
except KeyError:
raise ValueError("Argument user_ldap should be provided!")
request_metadata = self._prepare_request_metadata(user_ldap=user_ldap)
schemas = []
if self.schema_id:
schemas.append(
self.client.document_schema_path(
project=self.project_number,
location=self.location,
document_schema=self.schema_id,
)
)
return SearchDocumentsRequest(
parent=self.client.common_location_path(self.project_number, self.location),
request_metadata=request_metadata,
document_query=DocumentQuery(
query=query, is_nl_query=True, document_schema_names=schemas
),
qa_size_limit=self.qa_size_limit,
)
def _parse_search_response(
self, response: "SearchDocumentsPager"
) -> List[Document]:
documents = []
for doc in response.matching_documents:
metadata = {
"title": doc.document.title,
"source": doc.document.raw_document_path,
}
documents.append(
Document(page_content=doc.search_text_snippet, metadata=metadata)
)
return documents
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chat_models~test_tongyi.py | """Test Alibaba Tongyi Chat Model."""
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.tongyi import ChatTongyi
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
HumanMessage,
LLMResult,
)
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_default_call() -> None:
"""Test default model call."""
chat = ChatTongyi()
response = chat(messages=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_model() -> None:
"""Test model kwarg works."""
chat = ChatTongyi(model="qwen-plus")
response = chat(messages=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = ChatTongyi()
response = chat(
messages=[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="How are you doing?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_stream() -> None:
"""Test that stream works."""
chat = ChatTongyi(streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat(
messages=[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="Who are you?"),
],
stream=True,
callbacks=callback_manager,
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = ChatTongyi()
message = HumanMessage(content="Hi, how are you.")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
| [
"Who are you?",
"Hello.",
"Hello!",
"How are you doing?",
"Hi, how are you.",
"Hello"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~retrievers~test_ensemble.py | import pytest
from langchain.retrievers.bm25 import BM25Retriever
from langchain.retrievers.ensemble import EnsembleRetriever
from langchain.schema import Document
@pytest.mark.requires("rank_bm25")
def test_ensemble_retriever_get_relevant_docs() -> None:
doc_list = [
"I like apples",
"I like oranges",
"Apples and oranges are fruits",
]
dummy_retriever = BM25Retriever.from_texts(doc_list)
dummy_retriever.k = 1
ensemble_retriever = EnsembleRetriever(
retrievers=[dummy_retriever, dummy_retriever]
)
docs = ensemble_retriever.get_relevant_documents("I like apples")
assert len(docs) == 1
@pytest.mark.requires("rank_bm25")
def test_weighted_reciprocal_rank() -> None:
doc1 = Document(page_content="1")
doc2 = Document(page_content="2")
dummy_retriever = BM25Retriever.from_texts(["1", "2"])
ensemble_retriever = EnsembleRetriever(
retrievers=[dummy_retriever, dummy_retriever], weights=[0.4, 0.5], c=0
)
result = ensemble_retriever.weighted_reciprocal_rank([[doc1, doc2], [doc2, doc1]])
assert result[0].page_content == "2"
assert result[1].page_content == "1"
ensemble_retriever.weights = [0.5, 0.4]
result = ensemble_retriever.weighted_reciprocal_rank([[doc1, doc2], [doc2, doc1]])
assert result[0].page_content == "1"
assert result[1].page_content == "2"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~embeddings~modelscope_hub.py | from typing import Any, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra
from langchain.schema.embeddings import Embeddings
class ModelScopeEmbeddings(BaseModel, Embeddings):
"""ModelScopeHub embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import ModelScopeEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddings(model_id=model_id, model_revision="v1.0.0")
"""
embed: Any
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
model_revision: Optional[str] = None
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
except ImportError as e:
raise ImportError(
"Could not import some python packages."
"Please install it with `pip install modelscope`."
) from e
self.embed = pipeline(
Tasks.sentence_embedding,
model=self.model_id,
model_revision=self.model_revision,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
inputs = {"source_sentence": texts}
embeddings = self.embed(input=inputs)["text_embedding"]
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
inputs = {"source_sentence": [text]}
embedding = self.embed(input=inputs)["text_embedding"][0]
return embedding.tolist()
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~blob_loaders~youtube_audio.py | from typing import Iterable, List
from langchain.document_loaders.blob_loaders import FileSystemBlobLoader
from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader
class YoutubeAudioLoader(BlobLoader):
"""Load YouTube urls as audio file(s)."""
def __init__(self, urls: List[str], save_dir: str):
if not isinstance(urls, list):
raise TypeError("urls must be a list")
self.urls = urls
self.save_dir = save_dir
def yield_blobs(self) -> Iterable[Blob]:
"""Yield audio blobs for each url."""
try:
import yt_dlp
except ImportError:
raise ImportError(
"yt_dlp package not found, please install it with "
"`pip install yt_dlp`"
)
# Use yt_dlp to download audio given a YouTube url
ydl_opts = {
"format": "m4a/bestaudio/best",
"noplaylist": True,
"outtmpl": self.save_dir + "/%(title)s.%(ext)s",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "m4a",
}
],
}
for url in self.urls:
# Download file
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download(url)
# Yield the written blobs
loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a")
for blob in loader.yield_blobs():
yield blob
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~tests~integration_tests~chains~test_pal.py | """Test PAL chain."""
from langchain.llms import OpenAI
from langchain_experimental.pal_chain.base import PALChain
def test_math_prompt() -> None:
"""Test math prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_math_prompt(llm, timeout=None)
question = (
"Jan has three times the number of pets as Marcia. "
"Marcia has two more pets than Cindy. "
"If Cindy has four pets, how many total pets do the three have?"
)
output = pal_chain.run(question)
assert output == "28"
def test_colored_object_prompt() -> None:
"""Test colored object prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_colored_object_prompt(llm, timeout=None)
question = (
"On the desk, you see two blue booklets, "
"two purple booklets, and two yellow pairs of sunglasses. "
"If I remove all the pairs of sunglasses from the desk, "
"how many purple items remain on it?"
)
output = pal_chain.run(question)
assert output == "2"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~bibtex.py | import logging
import re
from pathlib import Path
from typing import Any, Iterator, List, Mapping, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.bibtex import BibtexparserWrapper
logger = logging.getLogger(__name__)
class BibtexLoader(BaseLoader):
"""Load a `bibtex` file.
Each document represents one entry from the bibtex file.
If a PDF file is present in the `file` bibtex field, the original PDF
is loaded into the document text. If no such file entry is present,
the `abstract` field is used instead.
"""
def __init__(
self,
file_path: str,
*,
parser: Optional[BibtexparserWrapper] = None,
max_docs: Optional[int] = None,
max_content_chars: Optional[int] = 4_000,
load_extra_metadata: bool = False,
file_pattern: str = r"[^:]+\.pdf",
):
"""Initialize the BibtexLoader.
Args:
file_path: Path to the bibtex file.
parser: The parser to use. If None, a default parser is used.
max_docs: Max number of associated documents to load. Use -1 means
no limit.
max_content_chars: Maximum number of characters to load from the PDF.
load_extra_metadata: Whether to load extra metadata from the PDF.
file_pattern: Regex pattern to match the file name in the bibtex.
"""
self.file_path = file_path
self.parser = parser or BibtexparserWrapper()
self.max_docs = max_docs
self.max_content_chars = max_content_chars
self.load_extra_metadata = load_extra_metadata
self.file_regex = re.compile(file_pattern)
def _load_entry(self, entry: Mapping[str, Any]) -> Optional[Document]:
import fitz
parent_dir = Path(self.file_path).parent
# regex is useful for Zotero flavor bibtex files
file_names = self.file_regex.findall(entry.get("file", ""))
if not file_names:
return None
texts: List[str] = []
for file_name in file_names:
try:
with fitz.open(parent_dir / file_name) as f:
texts.extend(page.get_text() for page in f)
except FileNotFoundError as e:
logger.debug(e)
content = "\n".join(texts) or entry.get("abstract", "")
if self.max_content_chars:
content = content[: self.max_content_chars]
metadata = self.parser.get_metadata(entry, load_extra=self.load_extra_metadata)
return Document(
page_content=content,
metadata=metadata,
)
def lazy_load(self) -> Iterator[Document]:
"""Load bibtex file using bibtexparser and get the article texts plus the
article metadata.
See https://bibtexparser.readthedocs.io/en/master/
Returns:
a list of documents with the document.page_content in text format
"""
try:
import fitz # noqa: F401
except ImportError:
raise ImportError(
"PyMuPDF package not found, please install it with "
"`pip install pymupdf`"
)
entries = self.parser.load_bibtex_entries(self.file_path)
if self.max_docs:
entries = entries[: self.max_docs]
for entry in entries:
doc = self._load_entry(entry)
if doc:
yield doc
def load(self) -> List[Document]:
"""Load bibtex file documents from the given bibtex file path.
See https://bibtexparser.readthedocs.io/en/master/
Args:
file_path: the path to the bibtex file
Returns:
a list of documents with the document.page_content in text format
"""
return list(self.lazy_load())
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~utilities~wikipedia.py | """Util that calls Wikipedia."""
import logging
from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
WIKIPEDIA_MAX_QUERY_LENGTH = 300
class WikipediaAPIWrapper(BaseModel):
"""Wrapper around WikipediaAPI.
To use, you should have the ``wikipedia`` python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
"""
wiki_client: Any #: :meta private:
top_k_results: int = 3
lang: str = "en"
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import wikipedia
wikipedia.set_lang(values["lang"])
values["wiki_client"] = wikipedia
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
return values
def run(self, query: str) -> str:
"""Run Wikipedia search and get page summaries."""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
summaries = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if summary := self._formatted_page_summary(page_title, wiki_page):
summaries.append(summary)
if not summaries:
return "No good Wikipedia Search Result was found"
return "\n\n".join(summaries)[: self.doc_content_chars_max]
@staticmethod
def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]:
return f"Page: {page_title}\nSummary: {wiki_page.summary}"
def _page_to_document(self, page_title: str, wiki_page: Any) -> Document:
main_meta = {
"title": page_title,
"summary": wiki_page.summary,
"source": wiki_page.url,
}
add_meta = (
{
"categories": wiki_page.categories,
"page_url": wiki_page.url,
"image_urls": wiki_page.images,
"related_titles": wiki_page.links,
"parent_id": wiki_page.parent_id,
"references": wiki_page.references,
"revision_id": wiki_page.revision_id,
"sections": wiki_page.sections,
}
if self.load_all_available_meta
else {}
)
doc = Document(
page_content=wiki_page.content[: self.doc_content_chars_max],
metadata={
**main_meta,
**add_meta,
},
)
return doc
def _fetch_page(self, page: str) -> Optional[str]:
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (
self.wiki_client.exceptions.PageError,
self.wiki_client.exceptions.DisambiguationError,
):
return None
def load(self, query: str) -> List[Document]:
"""
Run Wikipedia search and get the article text plus the meta information.
See
Returns: a list of documents.
"""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
docs = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if doc := self._page_to_document(page_title, wiki_page):
docs.append(doc)
return docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~evaluation~qa~eval_prompt.py | # flake8: noqa
from langchain.prompts import PromptTemplate
template = """You are a teacher grading a quiz.
You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either CORRECT or INCORRECT.
Example Format:
QUESTION: question here
STUDENT ANSWER: student's answer here
TRUE ANSWER: true answer here
GRADE: CORRECT or INCORRECT here
Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin!
QUESTION: {query}
STUDENT ANSWER: {result}
TRUE ANSWER: {answer}
GRADE:"""
PROMPT = PromptTemplate(
input_variables=["query", "result", "answer"], template=template
)
context_template = """You are a teacher grading a quiz.
You are given a question, the context the question is about, and the student's answer. You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.
Example Format:
QUESTION: question here
CONTEXT: context the question is about here
STUDENT ANSWER: student's answer here
GRADE: CORRECT or INCORRECT here
Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin!
QUESTION: {query}
CONTEXT: {context}
STUDENT ANSWER: {result}
GRADE:"""
CONTEXT_PROMPT = PromptTemplate(
input_variables=["query", "context", "result"], template=context_template
)
cot_template = """You are a teacher grading a quiz.
You are given a question, the context the question is about, and the student's answer. You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.
Write out in a step by step manner your reasoning to be sure that your conclusion is correct. Avoid simply stating the correct answer at the outset.
Example Format:
QUESTION: question here
CONTEXT: context the question is about here
STUDENT ANSWER: student's answer here
EXPLANATION: step by step reasoning here
GRADE: CORRECT or INCORRECT here
Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin!
QUESTION: {query}
CONTEXT: {context}
STUDENT ANSWER: {result}
EXPLANATION:"""
COT_PROMPT = PromptTemplate(
input_variables=["query", "context", "result"], template=cot_template
)
template = """You are comparing a submitted answer to an expert answer on a given SQL coding question. Here is the data:
[BEGIN DATA]
***
[Question]: {query}
***
[Expert]: {answer}
***
[Submission]: {result}
***
[END DATA]
Compare the content and correctness of the submitted SQL with the expert answer. Ignore any differences in whitespace, style, or output column names. The submitted answer may either be correct or incorrect. Determine which case applies. First, explain in detail the similarities or differences between the expert answer and the submission, ignoring superficial aspects such as whitespace, style or output column names. Do not state the final answer in your initial explanation. Then, respond with either "CORRECT" or "INCORRECT" (without quotes or punctuation) on its own line. This should correspond to whether the submitted SQL and the expert answer are semantically the same or different, respectively. Then, repeat your final answer on a new line."""
SQL_PROMPT = PromptTemplate(
input_variables=["query", "answer", "result"], template=template
)
| [
"You are a teacher grading a quiz.\nYou are given a question, the context the question is about, and the student's answer. You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.\n\nExample Format:\nQUESTION: question here\nCONTEXT: context the question is about here\nSTUDENT ANSWER: student's answer here\nGRADE: CORRECT or INCORRECT here\n\nGrade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! \n\nQUESTION: {query}\nCONTEXT: {context}\nSTUDENT ANSWER: {result}\nGRADE:",
"You are a teacher grading a quiz.\nYou are given a question, the context the question is about, and the student's answer. You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.\nWrite out in a step by step manner your reasoning to be sure that your conclusion is correct. Avoid simply stating the correct answer at the outset.\n\nExample Format:\nQUESTION: question here\nCONTEXT: context the question is about here\nSTUDENT ANSWER: student's answer here\nEXPLANATION: step by step reasoning here\nGRADE: CORRECT or INCORRECT here\n\nGrade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! \n\nQUESTION: {query}\nCONTEXT: {context}\nSTUDENT ANSWER: {result}\nEXPLANATION:",
"You are a teacher grading a quiz.\nYou are given a question, the student's answer, and the true answer, and are asked to score the student answer as either CORRECT or INCORRECT.\n\nExample Format:\nQUESTION: question here\nSTUDENT ANSWER: student's answer here\nTRUE ANSWER: true answer here\nGRADE: CORRECT or INCORRECT here\n\nGrade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! \n\nQUESTION: {query}\nSTUDENT ANSWER: {result}\nTRUE ANSWER: {answer}\nGRADE:",
"You are comparing a submitted answer to an expert answer on a given SQL coding question. Here is the data:\n[BEGIN DATA]\n***\n[Question]: {query}\n***\n[Expert]: {answer}\n***\n[Submission]: {result}\n***\n[END DATA]\nCompare the content and correctness of the submitted SQL with the expert answer. Ignore any differences in whitespace, style, or output column names. The submitted answer may either be correct or incorrect. Determine which case applies. First, explain in detail the similarities or differences between the expert answer and the submission, ignoring superficial aspects such as whitespace, style or output column names. Do not state the final answer in your initial explanation. Then, respond with either \"CORRECT\" or \"INCORRECT\" (without quotes or punctuation) on its own line. This should correspond to whether the submitted SQL and the expert answer are semantically the same or different, respectively. Then, repeat your final answer on a new line.",
"context",
"answer"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chains~test_graph_database_arangodb.py | """Test Graph Database Chain."""
from typing import Any
from langchain.chains.graph_qa.arangodb import ArangoGraphQAChain
from langchain.graphs import ArangoGraph
from langchain.graphs.arangodb_graph import get_arangodb_client
from langchain.llms.openai import OpenAI
def populate_arangodb_database(db: Any) -> None:
if db.has_graph("GameOfThrones"):
return
db.create_graph(
"GameOfThrones",
edge_definitions=[
{
"edge_collection": "ChildOf",
"from_vertex_collections": ["Characters"],
"to_vertex_collections": ["Characters"],
},
],
)
documents = [
{
"_key": "NedStark",
"name": "Ned",
"surname": "Stark",
"alive": True,
"age": 41,
"gender": "male",
},
{
"_key": "AryaStark",
"name": "Arya",
"surname": "Stark",
"alive": True,
"age": 11,
"gender": "female",
},
]
edges = [{"_to": "Characters/NedStark", "_from": "Characters/AryaStark"}]
db.collection("Characters").import_bulk(documents)
db.collection("ChildOf").import_bulk(edges)
def test_connect_arangodb() -> None:
"""Test that the ArangoDB database is correctly instantiated and connected."""
graph = ArangoGraph(get_arangodb_client())
sample_aql_result = graph.query("RETURN 'hello world'")
assert ["hello_world"] == sample_aql_result
def test_empty_schema_on_no_data() -> None:
"""Test that the schema is empty for an empty ArangoDB Database"""
db = get_arangodb_client()
db.delete_graph("GameOfThrones", drop_collections=True, ignore_missing=True)
db.delete_collection("empty_collection", ignore_missing=True)
db.create_collection("empty_collection")
graph = ArangoGraph(db)
assert graph.schema == {
"Graph Schema": [],
"Collection Schema": [],
}
def test_aql_generation() -> None:
"""Test that AQL statement is correctly generated and executed."""
db = get_arangodb_client()
populate_arangodb_database(db)
graph = ArangoGraph(db)
chain = ArangoGraphQAChain.from_llm(OpenAI(temperature=0), graph=graph)
chain.return_aql_result = True
output = chain("Is Ned Stark alive?")
assert output["aql_result"] == [True]
assert "Yes" in output["result"]
output = chain("How old is Arya Stark?")
assert output["aql_result"] == [11]
assert "11" in output["result"]
output = chain("What is the relationship between Arya Stark and Ned Stark?")
assert len(output["aql_result"]) == 1
assert "child of" in output["result"]
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~langchain_experimental~prompt_injection_identifier~hugging_face_identifier.py | """Tool for the identification of prompt injection attacks."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain.pydantic_v1 import Field
from langchain.tools.base import BaseTool
if TYPE_CHECKING:
from transformers import Pipeline
def _model_default_factory() -> Pipeline:
try:
from transformers import pipeline
except ImportError as e:
raise ImportError(
"Cannot import transformers, please install with "
"`pip install transformers`."
) from e
return pipeline("text-classification", model="deepset/deberta-v3-base-injection")
class HuggingFaceInjectionIdentifier(BaseTool):
"""Tool that uses deberta-v3-base-injection to detect prompt injection attacks."""
name: str = "hugging_face_injection_identifier"
description: str = (
"A wrapper around HuggingFace Prompt Injection security model. "
"Useful for when you need to ensure that prompt is free of injection attacks. "
"Input should be any message from the user."
)
model: Pipeline = Field(default_factory=_model_default_factory)
def _run(self, query: str) -> str:
"""Use the tool."""
result = self.model(query)
result = sorted(result, key=lambda x: x["score"], reverse=True)
if result[0]["label"] == "INJECTION":
raise ValueError("Prompt injection attack detected")
return query
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chains~test_dalle_agent.py | """Integration test for Dall-E image generator agent."""
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI
def test_call() -> None:
"""Test that the agent runs and returns output."""
llm = OpenAI(temperature=0.9)
tools = load_tools(["dalle-image-generator"])
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
output = agent.run("Create an image of a volcano island")
assert output is not None
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_self_hosted.py | """Test self-hosted embeddings."""
from typing import Any
from langchain.embeddings import (
SelfHostedEmbeddings,
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
gpu.install_packages(["pip:./"])
return gpu
def test_self_hosted_huggingface_embedding_documents() -> None:
"""Test self-hosted huggingface embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_embedding_query() -> None:
"""Test self-hosted huggingface embeddings."""
document = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_query(document)
assert len(output) == 768
def test_self_hosted_huggingface_instructor_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_instructor_embedding_query() -> None:
"""Test self-hosted huggingface instruct embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_query(query)
assert len(output) == 768
def get_pipeline() -> Any:
"""Get pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "facebook/bart-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
def inference_fn(pipeline: Any, prompt: str) -> Any:
"""Inference function for testing."""
# Return last hidden state of the model
if isinstance(prompt, list):
return [emb[0][-1] for emb in pipeline(prompt)]
return pipeline(prompt)[0][-1]
def test_self_hosted_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"] * 2
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings(
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 50265
def test_self_hosted_embedding_query() -> None:
"""Test self-hosted custom embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings(
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_query(query)
assert len(output) == 50265
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chains~loading.py | """Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains import ReduceDocumentsChain
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.graph_qa.cypher import GraphCypherQAChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import (
_load_output_parser,
load_prompt,
load_prompt_from_config,
)
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
_load_output_parser(config)
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "reduce_documents_chain" in config:
reduce_documents_chain = load_chain_from_config(
config.pop("reduce_documents_chain")
)
elif "reduce_documents_chain_path" in config:
reduce_documents_chain = load_chain(config.pop("reduce_documents_chain_path"))
else:
reduce_documents_chain = _load_reduce_documents_chain(config)
return MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
**config,
)
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain:
combine_documents_chain = None
collapse_documents_chain = None
if "combine_documents_chain" in config:
combine_document_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_documents_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
elif "combine_document_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
if "collapse_documents_chain" in config:
collapse_document_chain_config = config.pop("collapse_documents_chain")
if collapse_document_chain_config is None:
collapse_documents_chain = None
else:
collapse_documents_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_documents_chain_path" in config:
collapse_documents_chain = load_chain(
config.pop("collapse_documents_chain_path")
)
elif "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_documents_chain = None
else:
collapse_documents_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_documents_chain = load_chain(
config.pop("collapse_document_chain_path")
)
return ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_documents_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> Any:
from langchain_experimental.llm_bash.base import LLMBashChain
llm_chain = None
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
# llm attribute is deprecated in favor of llm_chain, here to support old configs
elif "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
# llm_path attribute is deprecated in favor of llm_chain_path,
# its to support old configs
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
if llm_chain:
return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config)
else:
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
llm_chain = None
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
# llm attribute is deprecated in favor of llm_chain, here to support old configs
elif "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
# llm_path attribute is deprecated in favor of llm_chain_path,
# its to support old configs
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
if llm_chain:
return LLMMathChain(llm_chain=llm_chain, prompt=prompt, **config)
else:
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> Any:
from langchain_experimental.pal_chain import PALChain
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
return PALChain(llm_chain=llm_chain, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_path` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_path` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
from langchain_experimental.sql import SQLDatabaseChain
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
chain = load_chain_from_config(llm_chain_config)
return SQLDatabaseChain(llm_chain=chain, database=database, **config)
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
else:
prompt = None
return SQLDatabaseChain.from_llm(llm, database, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_retrieval_qa(config: dict, **kwargs: Any) -> RetrievalQA:
if "retriever" in kwargs:
retriever = kwargs.pop("retriever")
else:
raise ValueError("`retriever` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return RetrievalQA(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
**config,
)
def _load_retrieval_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> RetrievalQAWithSourcesChain:
if "retriever" in kwargs:
retriever = kwargs.pop("retriever")
else:
raise ValueError("`retriever` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return RetrievalQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain:
if "graph" in kwargs:
graph = kwargs.pop("graph")
else:
raise ValueError("`graph` must be present.")
if "cypher_generation_chain" in config:
cypher_generation_chain_config = config.pop("cypher_generation_chain")
cypher_generation_chain = load_chain_from_config(cypher_generation_chain_config)
else:
raise ValueError("`cypher_generation_chain` must be present.")
if "qa_chain" in config:
qa_chain_config = config.pop("qa_chain")
qa_chain = load_chain_from_config(qa_chain_config)
else:
raise ValueError("`qa_chain` must be present.")
return GraphCypherQAChain(
graph=graph,
cypher_generation_chain=cypher_generation_chain,
qa_chain=qa_chain,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"reduce_documents_chain": _load_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
"retrieval_qa": _load_retrieval_qa,
"retrieval_qa_with_sources_chain": _load_retrieval_qa_with_sources_chain,
"graph_cypher_chain": _load_graph_cypher_chain,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"list_assertions_prompt",
"create_draft_answer_prompt",
"revised_answer_prompt",
"revised_answer_prompt_path",
"None",
"document_prompt",
"create_draft_answer_prompt_path",
"check_assertions_prompt",
"document_prompt_path",
"prompt_path",
"check_assertions_prompt_path",
"list_assertions_prompt_path"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~output_parsers~test_structured_parser.py | from langchain.output_parsers import ResponseSchema, StructuredOutputParser
from langchain.schema import OutputParserException
def test_parse() -> None:
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
# Test valid JSON input
text = '```json\n{"name": "John", "age": 30}\n```'
expected_result = {"name": "John", "age": 30}
result = parser.parse(text)
assert result == expected_result, f"Expected {expected_result}, but got {result}"
# Test invalid JSON input
text = '```json\n{"name": "John"}\n```'
try:
parser.parse(text)
except OutputParserException:
pass # Test passes if OutputParserException is raised
else:
assert False, f"Expected OutputParserException, but got {parser.parse(text)}"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~tools~requests~test_tool.py | import asyncio
from typing import Any, Dict
import pytest
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
_parse_input,
)
from langchain.utilities.requests import TextRequestsWrapper
class _MockTextRequestsWrapper(TextRequestsWrapper):
@staticmethod
def get(url: str, **kwargs: Any) -> str:
return "get_response"
@staticmethod
async def aget(url: str, **kwargs: Any) -> str:
return "aget_response"
@staticmethod
def post(url: str, data: Dict[str, Any], **kwargs: Any) -> str:
return f"post {str(data)}"
@staticmethod
async def apost(url: str, data: Dict[str, Any], **kwargs: Any) -> str:
return f"apost {str(data)}"
@staticmethod
def patch(url: str, data: Dict[str, Any], **kwargs: Any) -> str:
return f"patch {str(data)}"
@staticmethod
async def apatch(url: str, data: Dict[str, Any], **kwargs: Any) -> str:
return f"apatch {str(data)}"
@staticmethod
def put(url: str, data: Dict[str, Any], **kwargs: Any) -> str:
return f"put {str(data)}"
@staticmethod
async def aput(url: str, data: Dict[str, Any], **kwargs: Any) -> str:
return f"aput {str(data)}"
@staticmethod
def delete(url: str, **kwargs: Any) -> str:
return "delete_response"
@staticmethod
async def adelete(url: str, **kwargs: Any) -> str:
return "adelete_response"
@pytest.fixture
def mock_requests_wrapper() -> TextRequestsWrapper:
return _MockTextRequestsWrapper()
def test_parse_input() -> None:
input_text = '{"url": "https://example.com", "data": {"key": "value"}}'
expected_output = {"url": "https://example.com", "data": {"key": "value"}}
assert _parse_input(input_text) == expected_output
def test_requests_get_tool(mock_requests_wrapper: TextRequestsWrapper) -> None:
tool = RequestsGetTool(requests_wrapper=mock_requests_wrapper)
assert tool.run("https://example.com") == "get_response"
assert asyncio.run(tool.arun("https://example.com")) == "aget_response"
def test_requests_post_tool(mock_requests_wrapper: TextRequestsWrapper) -> None:
tool = RequestsPostTool(requests_wrapper=mock_requests_wrapper)
input_text = '{"url": "https://example.com", "data": {"key": "value"}}'
assert tool.run(input_text) == "post {'key': 'value'}"
assert asyncio.run(tool.arun(input_text)) == "apost {'key': 'value'}"
def test_requests_patch_tool(mock_requests_wrapper: TextRequestsWrapper) -> None:
tool = RequestsPatchTool(requests_wrapper=mock_requests_wrapper)
input_text = '{"url": "https://example.com", "data": {"key": "value"}}'
assert tool.run(input_text) == "patch {'key': 'value'}"
assert asyncio.run(tool.arun(input_text)) == "apatch {'key': 'value'}"
def test_requests_put_tool(mock_requests_wrapper: TextRequestsWrapper) -> None:
tool = RequestsPutTool(requests_wrapper=mock_requests_wrapper)
input_text = '{"url": "https://example.com", "data": {"key": "value"}}'
assert tool.run(input_text) == "put {'key': 'value'}"
assert asyncio.run(tool.arun(input_text)) == "aput {'key': 'value'}"
def test_requests_delete_tool(mock_requests_wrapper: TextRequestsWrapper) -> None:
tool = RequestsDeleteTool(requests_wrapper=mock_requests_wrapper)
assert tool.run("https://example.com") == "delete_response"
assert asyncio.run(tool.arun("https://example.com")) == "adelete_response"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_deeplake.py | """Test Deep Lake functionality."""
import pytest
from pytest import FixtureRequest
from langchain.docstore.document import Document
from langchain.vectorstores import DeepLake
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def deeplake_datastore() -> DeepLake:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(
dataset_path="./test_path",
texts=texts,
metadatas=metadatas,
embedding_function=FakeEmbeddings(),
overwrite=True,
)
return docsearch
@pytest.fixture(params=["L1", "L2", "max", "cos"])
def distance_metric(request: FixtureRequest) -> str:
return request.param
def test_deeplake() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_deeplake_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_deeplakewith_persistence() -> None:
"""Test end to end construction and search, with persistence."""
import deeplake
dataset_path = "./tests/persist_dir"
if deeplake.exists(dataset_path):
deeplake.delete(dataset_path)
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path=dataset_path,
texts=texts,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
# Clean up
docsearch.delete_dataset()
# Persist doesn't need to be called again
# Data will be automatically persisted on object deletion
# Or on program exit
def test_deeplake_overwrite_flag() -> None:
"""Test overwrite behavior"""
import deeplake
dataset_path = "./tests/persist_dir"
if deeplake.exists(dataset_path):
deeplake.delete(dataset_path)
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path=dataset_path,
texts=texts,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with no overwrite (implicit)
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
# assert page still present
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with no overwrite (explicit)
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
overwrite=False,
)
output = docsearch.similarity_search("foo", k=1)
# assert page still present
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with overwrite
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
overwrite=True,
)
with pytest.raises(ValueError):
output = docsearch.similarity_search("foo", k=1)
def test_similarity_search(deeplake_datastore: DeepLake, distance_metric: str) -> None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search(
"foo", k=1, distance_metric=distance_metric
)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
tql_query = (
f"SELECT * WHERE "
f"id=='{deeplake_datastore.vectorstore.dataset.id[0].numpy()[0]}'"
)
output = deeplake_datastore.similarity_search(
query="foo", tql_query=tql_query, k=1, distance_metric=distance_metric
)
assert len(output) == 1
deeplake_datastore.delete_dataset()
def test_similarity_search_by_vector(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search by vector."""
embeddings = FakeEmbeddings().embed_documents(["foo", "bar", "baz"])
output = deeplake_datastore.similarity_search_by_vector(
embeddings[1], k=1, distance_metric=distance_metric
)
assert output == [Document(page_content="bar", metadata={"page": "1"})]
deeplake_datastore.delete_dataset()
def test_similarity_search_with_score(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search with score."""
output, score = deeplake_datastore.similarity_search_with_score(
"foo", k=1, distance_metric=distance_metric
)[0]
assert output == Document(page_content="foo", metadata={"page": "0"})
if distance_metric == "cos":
assert score == 1.0
else:
assert score == 0.0
deeplake_datastore.delete_dataset()
def test_similarity_search_with_filter(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search(
"foo",
k=1,
distance_metric=distance_metric,
filter={"metadata": {"page": "1"}},
)
assert output == [Document(page_content="bar", metadata={"page": "1"})]
deeplake_datastore.delete_dataset()
def test_max_marginal_relevance_search(deeplake_datastore: DeepLake) -> None:
"""Test max marginal relevance search by vector."""
output = deeplake_datastore.max_marginal_relevance_search("foo", k=1, fetch_k=2)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
embeddings = FakeEmbeddings().embed_documents(["foo", "bar", "baz"])
output = deeplake_datastore.max_marginal_relevance_search_by_vector(
embeddings[0], k=1, fetch_k=2
)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
deeplake_datastore.delete_dataset()
def test_delete_dataset_by_ids(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
id = deeplake_datastore.vectorstore.dataset.id.data()["value"][0]
deeplake_datastore.delete(ids=[id])
assert (
deeplake_datastore.similarity_search(
"foo", k=1, filter={"metadata": {"page": "0"}}
)
== []
)
assert len(deeplake_datastore.vectorstore) == 2
deeplake_datastore.delete_dataset()
def test_delete_dataset_by_filter(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
deeplake_datastore.delete(filter={"metadata": {"page": "1"}})
assert (
deeplake_datastore.similarity_search(
"bar", k=1, filter={"metadata": {"page": "1"}}
)
== []
)
assert len(deeplake_datastore.vectorstore.dataset) == 2
deeplake_datastore.delete_dataset()
def test_delete_by_path(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
import deeplake
path = deeplake_datastore.dataset_path
DeepLake.force_delete_by_path(path)
assert not deeplake.exists(path)
def test_add_texts(deeplake_datastore: DeepLake) -> None:
"""Test add_texts dataset."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
deeplake_datastore.add_texts(
texts=texts,
metadatas=metadatas,
)
with pytest.raises(TypeError):
deeplake_datastore.add_texts(
texts=texts,
metada=metadatas,
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chains~test_graph_database_sparql.py | """Test RDF/ SPARQL Graph Database Chain."""
import os
from langchain.chains.graph_qa.sparql import GraphSparqlQAChain
from langchain.graphs import RdfGraph
from langchain.llms.openai import OpenAI
def test_connect_file_rdf() -> None:
"""
Test loading online resource.
"""
berners_lee_card = "http://www.w3.org/People/Berners-Lee/card"
graph = RdfGraph(
source_file=berners_lee_card,
standard="rdf",
)
query = """SELECT ?s ?p ?o\n""" """WHERE { ?s ?p ?o }"""
output = graph.query(query)
assert len(output) == 86
def test_sparql_select() -> None:
"""
Test for generating and executing simple SPARQL SELECT query.
"""
berners_lee_card = "http://www.w3.org/People/Berners-Lee/card"
graph = RdfGraph(
source_file=berners_lee_card,
standard="rdf",
)
chain = GraphSparqlQAChain.from_llm(OpenAI(temperature=0), graph=graph)
output = chain.run("What is Tim Berners-Lee's work homepage?")
expected_output = (
" The work homepage of Tim Berners-Lee is "
"http://www.w3.org/People/Berners-Lee/."
)
assert output == expected_output
def test_sparql_insert() -> None:
"""
Test for generating and executing simple SPARQL INSERT query.
"""
berners_lee_card = "http://www.w3.org/People/Berners-Lee/card"
_local_copy = "test.ttl"
graph = RdfGraph(
source_file=berners_lee_card,
standard="rdf",
local_copy=_local_copy,
)
chain = GraphSparqlQAChain.from_llm(OpenAI(temperature=0), graph=graph)
chain.run(
"Save that the person with the name 'Timothy Berners-Lee' "
"has a work homepage at 'http://www.w3.org/foo/bar/'"
)
query = (
"""PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n"""
"""SELECT ?hp\n"""
"""WHERE {\n"""
""" ?person foaf:name "Timothy Berners-Lee" . \n"""
""" ?person foaf:workplaceHomepage ?hp .\n"""
"""}"""
)
output = graph.query(query)
assert len(output) == 2
# clean up
try:
os.remove(_local_copy)
except OSError:
pass
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~clickup~toolkit.py | from typing import Dict, List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.clickup.prompt import (
CLICKUP_FOLDER_CREATE_PROMPT,
CLICKUP_GET_ALL_TEAMS_PROMPT,
CLICKUP_GET_FOLDERS_PROMPT,
CLICKUP_GET_LIST_PROMPT,
CLICKUP_GET_SPACES_PROMPT,
CLICKUP_GET_TASK_ATTRIBUTE_PROMPT,
CLICKUP_GET_TASK_PROMPT,
CLICKUP_LIST_CREATE_PROMPT,
CLICKUP_TASK_CREATE_PROMPT,
CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT,
CLICKUP_UPDATE_TASK_PROMPT,
)
from langchain.tools.clickup.tool import ClickupAction
from langchain.utilities.clickup import ClickupAPIWrapper
class ClickupToolkit(BaseToolkit):
"""Clickup Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
See https://python.langchain.com/docs/security for more information.
"""
tools: List[BaseTool] = []
@classmethod
def from_clickup_api_wrapper(
cls, clickup_api_wrapper: ClickupAPIWrapper
) -> "ClickupToolkit":
operations: List[Dict] = [
{
"mode": "get_task",
"name": "Get task",
"description": CLICKUP_GET_TASK_PROMPT,
},
{
"mode": "get_task_attribute",
"name": "Get task attribute",
"description": CLICKUP_GET_TASK_ATTRIBUTE_PROMPT,
},
{
"mode": "get_teams",
"name": "Get Teams",
"description": CLICKUP_GET_ALL_TEAMS_PROMPT,
},
{
"mode": "create_task",
"name": "Create Task",
"description": CLICKUP_TASK_CREATE_PROMPT,
},
{
"mode": "create_list",
"name": "Create List",
"description": CLICKUP_LIST_CREATE_PROMPT,
},
{
"mode": "create_folder",
"name": "Create Folder",
"description": CLICKUP_FOLDER_CREATE_PROMPT,
},
{
"mode": "get_list",
"name": "Get all lists in the space",
"description": CLICKUP_GET_LIST_PROMPT,
},
{
"mode": "get_folders",
"name": "Get all folders in the workspace",
"description": CLICKUP_GET_FOLDERS_PROMPT,
},
{
"mode": "get_spaces",
"name": "Get all spaces in the workspace",
"description": CLICKUP_GET_SPACES_PROMPT,
},
{
"mode": "update_task",
"name": "Update task",
"description": CLICKUP_UPDATE_TASK_PROMPT,
},
{
"mode": "update_task_assignees",
"name": "Update task assignees",
"description": CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT,
},
]
tools = [
ClickupAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=clickup_api_wrapper,
)
for action in operations
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~pinecone_hybrid_search.py | """Taken from: https://docs.pinecone.io/docs/hybrid-search"""
import hashlib
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.schema.embeddings import Embeddings
def hash_text(text: str) -> str:
"""Hash a text using SHA256.
Args:
text: Text to hash.
Returns:
Hashed text.
"""
return str(hashlib.sha256(text.encode("utf-8")).hexdigest())
def create_index(
contexts: List[str],
index: Any,
embeddings: Embeddings,
sparse_encoder: Any,
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
namespace: Optional[str] = None,
) -> None:
"""Create an index from a list of contexts.
It modifies the index argument in-place!
Args:
contexts: List of contexts to embed.
index: Index to use.
embeddings: Embeddings model to use.
sparse_encoder: Sparse encoder to use.
ids: List of ids to use for the documents.
metadatas: List of metadata to use for the documents.
"""
batch_size = 32
_iterator = range(0, len(contexts), batch_size)
try:
from tqdm.auto import tqdm
_iterator = tqdm(_iterator)
except ImportError:
pass
if ids is None:
# create unique ids using hash of the text
ids = [hash_text(context) for context in contexts]
for i in _iterator:
# find end of batch
i_end = min(i + batch_size, len(contexts))
# extract batch
context_batch = contexts[i:i_end]
batch_ids = ids[i:i_end]
metadata_batch = (
metadatas[i:i_end] if metadatas else [{} for _ in context_batch]
)
# add context passages as metadata
meta = [
{"context": context, **metadata}
for context, metadata in zip(context_batch, metadata_batch)
]
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch)
# create sparse vectors
sparse_embeds = sparse_encoder.encode_documents(context_batch)
for s in sparse_embeds:
s["values"] = [float(s1) for s1 in s["values"]]
vectors = []
# loop through the data and create dictionaries for upserts
for doc_id, sparse, dense, metadata in zip(
batch_ids, sparse_embeds, dense_embeds, meta
):
vectors.append(
{
"id": doc_id,
"sparse_values": sparse,
"values": dense,
"metadata": metadata,
}
)
# upload the documents to the new hybrid index
index.upsert(vectors, namespace=namespace)
class PineconeHybridSearchRetriever(BaseRetriever):
"""`Pinecone Hybrid Search` retriever."""
embeddings: Embeddings
"""Embeddings model to use."""
"""description"""
sparse_encoder: Any
"""Sparse encoder to use."""
index: Any
"""Pinecone index to use."""
top_k: int = 4
"""Number of documents to return."""
alpha: float = 0.5
"""Alpha value for hybrid search."""
namespace: Optional[str] = None
"""Namespace value for index partition."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def add_texts(
self,
texts: List[str],
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
namespace: Optional[str] = None,
) -> None:
create_index(
texts,
self.index,
self.embeddings,
self.sparse_encoder,
ids=ids,
metadatas=metadatas,
namespace=namespace,
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401
from pinecone_text.sparse.base_sparse_encoder import (
BaseSparseEncoder, # noqa:F401
)
except ImportError:
raise ImportError(
"Could not import pinecone_text python package. "
"Please install it with `pip install pinecone_text`."
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
# convert the question into a dense vector
dense_vec = self.embeddings.embed_query(query)
# scale alpha with hybrid_scale
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha)
sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]]
# query pinecone with the query parameters
result = self.index.query(
vector=dense_vec,
sparse_vector=sparse_vec,
top_k=self.top_k,
include_metadata=True,
namespace=self.namespace,
)
final_result = []
for res in result["matches"]:
context = res["metadata"].pop("context")
final_result.append(
Document(page_content=context, metadata=res["metadata"])
)
# return search results as json
return final_result
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~tools~edenai~test_ocr_identityparser.py | """Test EdenAi's identity parser Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain.tools.edenai import EdenAiParsingIDTool
def test_edenai_call() -> None:
"""Test simple call to edenai's identity parser endpoint."""
id_parser = EdenAiParsingIDTool(providers=["amazon"], language="en")
output = id_parser(
"https://www.citizencard.com/images/citizencard-uk-id-card-2023.jpg"
)
assert id_parser.name == "edenai_identity_parsing"
assert id_parser.feature == "ocr"
assert id_parser.subfeature == "identity_parser"
assert isinstance(output, str)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_pdf.py | from pathlib import Path
from typing import Sequence, Union
import pytest
from langchain.document_loaders import (
AmazonTextractPDFLoader,
MathpixPDFLoader,
PDFMinerLoader,
PDFMinerPDFasHTMLLoader,
PyMuPDFLoader,
PyPDFium2Loader,
PyPDFLoader,
UnstructuredPDFLoader,
)
def test_unstructured_pdf_loader_elements_mode() -> None:
"""Test unstructured loader with various modes."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = UnstructuredPDFLoader(str(file_path), mode="elements")
docs = loader.load()
assert len(docs) == 2
def test_unstructured_pdf_loader_paged_mode() -> None:
"""Test unstructured loader with various modes."""
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = UnstructuredPDFLoader(str(file_path), mode="paged")
docs = loader.load()
assert len(docs) == 16
def test_unstructured_pdf_loader_default_mode() -> None:
"""Test unstructured loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = UnstructuredPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
def test_pdfminer_loader() -> None:
"""Test PDFMiner loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PDFMinerLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PDFMinerLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
def test_pdfminer_pdf_as_html_loader() -> None:
"""Test PDFMinerPDFasHTMLLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PDFMinerPDFasHTMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PDFMinerPDFasHTMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
def test_pypdf_loader() -> None:
"""Test PyPDFLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PyPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PyPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 16
def test_pypdfium2_loader() -> None:
"""Test PyPDFium2Loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PyPDFium2Loader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PyPDFium2Loader(str(file_path))
docs = loader.load()
assert len(docs) == 16
def test_pymupdf_loader() -> None:
"""Test PyMuPDF loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PyMuPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PyMuPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 16
assert loader.web_path is None
web_path = "https://people.sc.fsu.edu/~jpeterson/hello_world.pdf"
loader = PyMuPDFLoader(web_path)
docs = loader.load()
assert loader.web_path == web_path
assert loader.file_path != web_path
assert len(docs) == 1
def test_mathpix_loader() -> None:
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = MathpixPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
print(docs[0].page_content)
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = MathpixPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
print(docs[0].page_content)
@pytest.mark.parametrize(
"file_path, features, docs_length, create_client",
[
(
(
"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com"
"/langchain/alejandro_rosalez_sample_1.jpg"
),
["FORMS", "TABLES"],
1,
False,
),
(str(Path(__file__).parent.parent / "examples/hello.pdf"), ["FORMS"], 1, False),
(
"s3://amazon-textract-public-content/langchain/layout-parser-paper.pdf",
None,
16,
True,
),
],
)
@pytest.mark.skip(reason="Requires AWS credentials to run")
def test_amazontextract_loader(
file_path: str,
features: Union[Sequence[str], None],
docs_length: int,
create_client: bool,
) -> None:
if create_client:
import boto3
textract_client = boto3.client("textract", region_name="us-east-2")
loader = AmazonTextractPDFLoader(
file_path, textract_features=features, client=textract_client
)
else:
loader = AmazonTextractPDFLoader(file_path, textract_features=features)
docs = loader.load()
assert len(docs) == docs_length
@pytest.mark.skip(reason="Requires AWS credentials to run")
def test_amazontextract_loader_failures() -> None:
# 2-page PDF local file system
two_page_pdf = str(
Path(__file__).parent.parent / "examples/multi-page-forms-sample-2-page.pdf"
)
loader = AmazonTextractPDFLoader(two_page_pdf)
with pytest.raises(ValueError):
loader.load()
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~utilities~test_googlesearch_api.py | """Integration test for Google Search API Wrapper."""
from langchain.utilities.google_search import GoogleSearchAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = GoogleSearchAPIWrapper()
output = search.run("What was Obama's first name?")
assert "Barack Hussein Obama II" in output
def test_no_result_call() -> None:
"""Test that call gives no result."""
search = GoogleSearchAPIWrapper()
output = search.run(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
print(type(output))
assert "No good Google Search Result was found" == output
def test_result_with_params_call() -> None:
"""Test that call gives the correct answer with extra params."""
search = GoogleSearchAPIWrapper()
output = search.results(
query="What was Obama's first name?",
num_results=5,
search_params={"cr": "us", "safe": "active"},
)
assert len(output)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~weaviate_hybrid_search.py | from __future__ import annotations
from typing import Any, Dict, List, Optional, cast
from uuid import uuid4
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever
class WeaviateHybridSearchRetriever(BaseRetriever):
"""`Weaviate hybrid search` retriever.
See the documentation:
https://weaviate.io/blog/hybrid-search-explained
"""
client: Any
"""keyword arguments to pass to the Weaviate client."""
index_name: str
"""The name of the index to use."""
text_key: str
"""The name of the text key to use."""
alpha: float = 0.5
"""The weight of the text key in the hybrid search."""
k: int = 4
"""The number of results to return."""
attributes: List[str]
"""The attributes to return in the results."""
create_schema_if_missing: bool = True
"""Whether to create the schema if it doesn't exist."""
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(values["client"], weaviate.Client):
client = values["client"]
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
if values.get("attributes") is None:
values["attributes"] = []
cast(List, values["attributes"]).append(values["text_key"])
if values.get("create_schema_if_missing", True):
class_obj = {
"class": values["index_name"],
"properties": [{"name": values["text_key"], "dataType": ["text"]}],
"vectorizer": "text2vec-openai",
}
if not values["client"].schema.exists(values["index_name"]):
values["client"].schema.create_class(class_obj)
return values
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
# added text_key
def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]:
"""Upload documents to Weaviate."""
from weaviate.util import get_valid_uuid
with self.client.batch as batch:
ids = []
for i, doc in enumerate(docs):
metadata = doc.metadata or {}
data_properties = {self.text_key: doc.page_content, **metadata}
# If the UUID of one of the objects already exists
# then the existing objectwill be replaced by the new object.
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
else:
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self.index_name, _id)
ids.append(_id)
return ids
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
where_filter: Optional[Dict[str, object]] = None,
score: bool = False,
hybrid_search_kwargs: Optional[Dict[str, object]] = None,
) -> List[Document]:
"""Look up similar documents in Weaviate.
query: The query to search for relevant documents
of using weviate hybrid search.
where_filter: A filter to apply to the query.
https://weaviate.io/developers/weaviate/guides/querying/#filtering
score: Whether to include the score, and score explanation
in the returned Documents meta_data.
hybrid_search_kwargs: Used to pass additional arguments
to the .with_hybrid() method.
The primary uses cases for this are:
1) Search specific properties only -
specify which properties to be used during hybrid search portion.
Note: this is not the same as the (self.attributes) to be returned.
Example - hybrid_search_kwargs={"properties": ["question", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#selected-properties-only
2) Weight boosted searched properties -
Boost the weight of certain properties during the hybrid search portion.
Example - hybrid_search_kwargs={"properties": ["question^2", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#weight-boost-searched-properties
3) Search with a custom vector - Define a different vector
to be used during the hybrid search portion.
Example - hybrid_search_kwargs={"vector": [0.1, 0.2, 0.3, ...]}
https://weaviate.io/developers/weaviate/search/hybrid#with-a-custom-vector
4) Use Fusion ranking method
Example - from weaviate.gql.get import HybridFusion
hybrid_search_kwargs={"fusion": fusion_type=HybridFusion.RELATIVE_SCORE}
https://weaviate.io/developers/weaviate/search/hybrid#fusion-ranking-method
"""
query_obj = self.client.query.get(self.index_name, self.attributes)
if where_filter:
query_obj = query_obj.with_where(where_filter)
if score:
query_obj = query_obj.with_additional(["score", "explainScore"])
if hybrid_search_kwargs is None:
hybrid_search_kwargs = {}
result = (
query_obj.with_hybrid(query, alpha=self.alpha, **hybrid_search_kwargs)
.with_limit(self.k)
.do()
)
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self.index_name]:
text = res.pop(self.text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~evaluation~qa~test_eval_chain.py | """Test LLM Bash functionality."""
import sys
from typing import Type
import pytest
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
_parse_string_eval_output,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_eval_chain() -> None:
"""Test a simple eval chain."""
example = {"query": "What's my name", "answer": "John Doe"}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = QAEvalChain.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert fake_qa_eval_chain.output_key in outputs[0]
assert outputs[0][fake_qa_eval_chain.output_key] == "foo"
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
@pytest.mark.parametrize("chain_cls", [ContextQAEvalChain, CotQAEvalChain])
def test_context_eval_chain(chain_cls: Type[ContextQAEvalChain]) -> None:
"""Test a simple eval chain."""
example = {
"query": "What's my name",
"context": "The name of this person is John Doe",
}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = chain_cls.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert "text" in outputs[0]
assert outputs[0]["text"] == "foo"
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_implements_string_evaluator_protocol(
chain_cls: Type[LLMChain],
) -> None:
assert issubclass(chain_cls, StringEvaluator)
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_returns_expected_results(
chain_cls: Type[LLMChain],
) -> None:
fake_llm = FakeLLM(
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
)
chain = chain_cls.from_llm(fake_llm) # type: ignore
results = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert results["score"] == 1
@pytest.mark.parametrize(
"output,expected",
[
(
""" GRADE: CORRECT
QUESTION: according to the passage, what is the main reason that the author wrote this passage?
STUDENT ANSWER: to explain the importance of washing your hands
TRUE ANSWER: to explain the importance of washing your hands
GRADE:""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" Here is my step-by-step reasoning to grade the student's answer:
1. The question asks who founded the Roanoke settlement.
2. The context states that the grade incorrect answer is Walter Raleigh.
3. The student's answer is "Sir Walter Raleigh".
4. The student's answer matches the context, which states the answer is Walter Raleigh.
5. The addition of "Sir" in the student's answer does not contradict the context. It provides extra detail about Walter Raleigh's title, but the core answer of Walter Raleigh is still correct.
6. Therefore, the student's answer contains the same factual information as the true answer, so it should be graded as correct.
GRADE: CORRECT""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" CORRECT
QUESTION: who was the first president of the united states?
STUDENT ANSWER: George Washington
TRUE ANSWER: George Washington was the first president of the United States.
GRADE:""",
{
"value": "CORRECT",
"score": 1,
},
),
(
"""The student's answer is "Regent's Park," which matches the correct answer given in the context. Therefore, the student's answer is CORRECT.""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
],
)
def test_qa_output_parser(output: str, expected: dict) -> None:
expected["reasoning"] = output.strip()
assert _parse_string_eval_output(output) == expected
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~multi_vector.py | from typing import List
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Field
from langchain.schema import BaseRetriever, BaseStore, Document
from langchain.schema.vectorstore import VectorStore
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
docstore: BaseStore[str, Document]
"""The storage layer for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_predictionguard.py | """Test Prediction Guard API wrapper."""
from langchain.llms.predictionguard import PredictionGuard
def test_predictionguard_call() -> None:
"""Test valid call to prediction guard."""
llm = PredictionGuard(model="OpenAI-text-davinci-003")
output = llm("Say foo:")
assert isinstance(output, str)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~milvus.py | from __future__ import annotations
import logging
from typing import Any, Iterable, List, Optional, Tuple, Union
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
DEFAULT_MILVUS_CONNECTION = {
"host": "localhost",
"port": "19530",
"user": "",
"password": "",
"secure": False,
}
class Milvus(VectorStore):
"""`Milvus` vector store.
You need to install `pymilvus` and run Milvus.
See the following documentation for how to run a Milvus instance:
https://milvus.io/docs/install_standalone-docker.md
If looking for a hosted Milvus, take a look at this documentation:
https://zilliz.com/cloud and make use of the Zilliz vectorstore found in
this project.
IF USING L2/IP metric, IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA.
Args:
embedding_function (Embeddings): Function used to embed the text.
collection_name (str): Which Milvus collection to use. Defaults to
"LangChainCollection".
connection_args (Optional[dict[str, any]]): The connection args used for
this class comes in the form of a dict.
consistency_level (str): The consistency level to use for a collection.
Defaults to "Session".
index_params (Optional[dict]): Which index params to use. Defaults to
HNSW/AUTOINDEX depending on service.
search_params (Optional[dict]): Which search params to use. Defaults to
default of index.
drop_old (Optional[bool]): Whether to drop the current collection. Defaults
to False.
primary_field (str): Name of the primary key field. Defaults to "pk".
text_field (str): Name of the text field. Defaults to "text".
vector_field (str): Name of the vector field. Defaults to "vector".
The connection args used for this class comes in the form of a dict,
here are a few of the options:
address (str): The actual address of Milvus
instance. Example address: "localhost:19530"
uri (str): The uri of Milvus instance. Example uri:
"http://randomwebsite:19530",
"tcp:foobarsite:19530",
"https://ok.s3.south.com:19530".
host (str): The host of Milvus instance. Default at "localhost",
PyMilvus will fill in the default host if only port is provided.
port (str/int): The port of Milvus instance. Default at 19530, PyMilvus
will fill in the default port if only host is provided.
user (str): Use which user to connect to Milvus instance. If user and
password are provided, we will add related header in every RPC call.
password (str): Required when user is provided. The password
corresponding to the user.
secure (bool): Default is false. If set to true, tls will be enabled.
client_key_path (str): If use tls two-way authentication, need to
write the client.key path.
client_pem_path (str): If use tls two-way authentication, need to
write the client.pem path.
ca_pem_path (str): If use tls two-way authentication, need to write
the ca.pem path.
server_pem_path (str): If use tls one-way authentication, need to
write the server.pem path.
server_name (str): If use tls, need to write the common name.
Example:
.. code-block:: python
from langchain.vectorstores import Milvus
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
# Connect to a milvus instance on localhost
milvus_store = Milvus(
embedding_function = Embeddings,
collection_name = "LangChainCollection",
drop_old = True,
)
Raises:
ValueError: If the pymilvus python package is not installed.
"""
def __init__(
self,
embedding_function: Embeddings,
collection_name: str = "LangChainCollection",
connection_args: Optional[dict[str, Any]] = None,
consistency_level: str = "Session",
index_params: Optional[dict] = None,
search_params: Optional[dict] = None,
drop_old: Optional[bool] = False,
*,
primary_field: str = "pk",
text_field: str = "text",
vector_field: str = "vector",
):
"""Initialize the Milvus vector store."""
try:
from pymilvus import Collection, utility
except ImportError:
raise ValueError(
"Could not import pymilvus python package. "
"Please install it with `pip install pymilvus`."
)
# Default search params when one is not provided.
self.default_search_params = {
"IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}},
"IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}},
"IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}},
"HNSW": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}},
"IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"metric_type": "L2", "params": {"search_k": 10}},
"AUTOINDEX": {"metric_type": "L2", "params": {}},
}
self.embedding_func = embedding_function
self.collection_name = collection_name
self.index_params = index_params
self.search_params = search_params
self.consistency_level = consistency_level
# In order for a collection to be compatible, pk needs to be auto'id and int
self._primary_field = primary_field
# In order for compatibility, the text field will need to be called "text"
self._text_field = text_field
# In order for compatibility, the vector field needs to be called "vector"
self._vector_field = vector_field
self.fields: list[str] = []
# Create the connection to the server
if connection_args is None:
connection_args = DEFAULT_MILVUS_CONNECTION
self.alias = self._create_connection_alias(connection_args)
self.col: Optional[Collection] = None
# Grab the existing collection if it exists
if utility.has_collection(self.collection_name, using=self.alias):
self.col = Collection(
self.collection_name,
using=self.alias,
)
# If need to drop old, drop it
if drop_old and isinstance(self.col, Collection):
self.col.drop()
self.col = None
# Initialize the vector store
self._init()
@property
def embeddings(self) -> Embeddings:
return self.embedding_func
def _create_connection_alias(self, connection_args: dict) -> str:
"""Create the connection to the Milvus server."""
from pymilvus import MilvusException, connections
# Grab the connection arguments that are used for checking existing connection
host: str = connection_args.get("host", None)
port: Union[str, int] = connection_args.get("port", None)
address: str = connection_args.get("address", None)
uri: str = connection_args.get("uri", None)
user = connection_args.get("user", None)
# Order of use is host/port, uri, address
if host is not None and port is not None:
given_address = str(host) + ":" + str(port)
elif uri is not None:
given_address = uri.split("https://")[1]
elif address is not None:
given_address = address
else:
given_address = None
logger.debug("Missing standard address type for reuse attempt")
# User defaults to empty string when getting connection info
if user is not None:
tmp_user = user
else:
tmp_user = ""
# If a valid address was given, then check if a connection exists
if given_address is not None:
for con in connections.list_connections():
addr = connections.get_connection_addr(con[0])
if (
con[1]
and ("address" in addr)
and (addr["address"] == given_address)
and ("user" in addr)
and (addr["user"] == tmp_user)
):
logger.debug("Using previous connection: %s", con[0])
return con[0]
# Generate a new connection if one doesn't exist
alias = uuid4().hex
try:
connections.connect(alias=alias, **connection_args)
logger.debug("Created new connection using: %s", alias)
return alias
except MilvusException as e:
logger.error("Failed to create new connection using: %s", alias)
raise e
def _init(
self, embeddings: Optional[list] = None, metadatas: Optional[list[dict]] = None
) -> None:
if embeddings is not None:
self._create_collection(embeddings, metadatas)
self._extract_fields()
self._create_index()
self._create_search_params()
self._load()
def _create_collection(
self, embeddings: list, metadatas: Optional[list[dict]] = None
) -> None:
from pymilvus import (
Collection,
CollectionSchema,
DataType,
FieldSchema,
MilvusException,
)
from pymilvus.orm.types import infer_dtype_bydata
# Determine embedding dim
dim = len(embeddings[0])
fields = []
# Determine metadata schema
if metadatas:
# Create FieldSchema for each entry in metadata.
for key, value in metadatas[0].items():
# Infer the corresponding datatype of the metadata
dtype = infer_dtype_bydata(value)
# Datatype isn't compatible
if dtype == DataType.UNKNOWN or dtype == DataType.NONE:
logger.error(
"Failure to create collection, unrecognized dtype for key: %s",
key,
)
raise ValueError(f"Unrecognized datatype for {key}.")
# Dataype is a string/varchar equivalent
elif dtype == DataType.VARCHAR:
fields.append(FieldSchema(key, DataType.VARCHAR, max_length=65_535))
else:
fields.append(FieldSchema(key, dtype))
# Create the text field
fields.append(
FieldSchema(self._text_field, DataType.VARCHAR, max_length=65_535)
)
# Create the primary key field
fields.append(
FieldSchema(
self._primary_field, DataType.INT64, is_primary=True, auto_id=True
)
)
# Create the vector field, supports binary or float vectors
fields.append(
FieldSchema(self._vector_field, infer_dtype_bydata(embeddings[0]), dim=dim)
)
# Create the schema for the collection
schema = CollectionSchema(fields)
# Create the collection
try:
self.col = Collection(
name=self.collection_name,
schema=schema,
consistency_level=self.consistency_level,
using=self.alias,
)
except MilvusException as e:
logger.error(
"Failed to create collection: %s error: %s", self.collection_name, e
)
raise e
def _extract_fields(self) -> None:
"""Grab the existing fields from the Collection"""
from pymilvus import Collection
if isinstance(self.col, Collection):
schema = self.col.schema
for x in schema.fields:
self.fields.append(x.name)
# Since primary field is auto-id, no need to track it
self.fields.remove(self._primary_field)
def _get_index(self) -> Optional[dict[str, Any]]:
"""Return the vector index information if it exists"""
from pymilvus import Collection
if isinstance(self.col, Collection):
for x in self.col.indexes:
if x.field_name == self._vector_field:
return x.to_dict()
return None
def _create_index(self) -> None:
"""Create a index on the collection"""
from pymilvus import Collection, MilvusException
if isinstance(self.col, Collection) and self._get_index() is None:
try:
# If no index params, use a default HNSW based one
if self.index_params is None:
self.index_params = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
try:
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
# If default did not work, most likely on Zilliz Cloud
except MilvusException:
# Use AUTOINDEX based index
self.index_params = {
"metric_type": "L2",
"index_type": "AUTOINDEX",
"params": {},
}
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
logger.debug(
"Successfully created an index on collection: %s",
self.collection_name,
)
except MilvusException as e:
logger.error(
"Failed to create an index on collection: %s", self.collection_name
)
raise e
def _create_search_params(self) -> None:
"""Generate search params based on the current index type"""
from pymilvus import Collection
if isinstance(self.col, Collection) and self.search_params is None:
index = self._get_index()
if index is not None:
index_type: str = index["index_param"]["index_type"]
metric_type: str = index["index_param"]["metric_type"]
self.search_params = self.default_search_params[index_type]
self.search_params["metric_type"] = metric_type
def _load(self) -> None:
"""Load the collection if available."""
from pymilvus import Collection
if isinstance(self.col, Collection) and self._get_index() is not None:
self.col.load()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
timeout: Optional[int] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Insert text data into Milvus.
Inserting data when the collection has not be made yet will result
in creating a new Collection. The data of the first entity decides
the schema of the new collection, the dim is extracted from the first
embedding and the columns are decided by the first metadata dict.
Metada keys will need to be present for all inserted values. At
the moment there is no None equivalent in Milvus.
Args:
texts (Iterable[str]): The texts to embed, it is assumed
that they all fit in memory.
metadatas (Optional[List[dict]]): Metadata dicts attached to each of
the texts. Defaults to None.
timeout (Optional[int]): Timeout for each batch insert. Defaults
to None.
batch_size (int, optional): Batch size to use for insertion.
Defaults to 1000.
Raises:
MilvusException: Failure to add texts
Returns:
List[str]: The resulting keys for each inserted element.
"""
from pymilvus import Collection, MilvusException
texts = list(texts)
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug("Nothing to insert, skipping.")
return []
# If the collection hasn't been initialized yet, perform all steps to do so
if not isinstance(self.col, Collection):
self._init(embeddings, metadatas)
# Dict to hold all insert columns
insert_dict: dict[str, list] = {
self._text_field: texts,
self._vector_field: embeddings,
}
# Collect the metadata into the insert dict.
if metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
# Total insert count
vectors: list = insert_dict[self._vector_field]
total_count = len(vectors)
pks: list[str] = []
assert isinstance(self.col, Collection)
for i in range(0, total_count, batch_size):
# Grab end index
end = min(i + batch_size, total_count)
# Convert dict to list of lists batch for insertion
insert_list = [insert_dict[x][i:end] for x in self.fields]
# Insert into the collection.
try:
res: Collection
res = self.col.insert(insert_list, timeout=timeout, **kwargs)
pks.extend(res.primary_keys)
except MilvusException as e:
logger.error(
"Failed to insert batch starting at entity: %s/%s", i, total_count
)
raise e
return pks
def similarity_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string.
Args:
query (str): The text to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
res = self.similarity_search_with_score(
query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string.
Args:
embedding (List[float]): The embedding vector to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score.
For more information about the search parameters, take a look at the pymilvus
documentation found here:
https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md
Args:
query (str): The text being searched.
k (int, optional): The amount of results to return. Defaults to 4.
param (dict): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[float], List[Tuple[Document, any, any]]:
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
# Embed the query text.
embedding = self.embedding_func.embed_query(query)
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return res
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score.
For more information about the search parameters, take a look at the pymilvus
documentation found here:
https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md
Args:
embedding (List[float]): The embedding vector being searched.
k (int, optional): The amount of results to return. Defaults to 4.
param (dict): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Tuple[Document, float]]: Result doc and score.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
if param is None:
param = self.search_params
# Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
# Perform the search.
res = self.col.search(
data=[embedding],
anns_field=self._vector_field,
param=param,
limit=k,
expr=expr,
output_fields=output_fields,
timeout=timeout,
**kwargs,
)
# Organize results.
ret = []
for result in res[0]:
meta = {x: result.entity.get(x) for x in output_fields}
doc = Document(page_content=meta.pop(self._text_field), metadata=meta)
pair = (doc, result.score)
ret.append(pair)
return ret
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): The text being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
embedding = self.embedding_func.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
param=param,
expr=expr,
timeout=timeout,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
embedding (str): The embedding vector being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
if param is None:
param = self.search_params
# Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
# Perform the search.
res = self.col.search(
data=[embedding],
anns_field=self._vector_field,
param=param,
limit=fetch_k,
expr=expr,
output_fields=output_fields,
timeout=timeout,
**kwargs,
)
# Organize results.
ids = []
documents = []
scores = []
for result in res[0]:
meta = {x: result.entity.get(x) for x in output_fields}
doc = Document(page_content=meta.pop(self._text_field), metadata=meta)
documents.append(doc)
scores.append(result.score)
ids.append(result.id)
vectors = self.col.query(
expr=f"{self._primary_field} in {ids}",
output_fields=[self._primary_field, self._vector_field],
timeout=timeout,
)
# Reorganize the results from query to match search order.
vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors}
ordered_result_embeddings = [vectors[x] for x in ids]
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult
)
# Reorder the values and return.
ret = []
for x in new_ordering:
# Function can return -1 index
if x == -1:
break
else:
ret.append(documents[x])
return ret
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = "LangChainCollection",
connection_args: dict[str, Any] = DEFAULT_MILVUS_CONNECTION,
consistency_level: str = "Session",
index_params: Optional[dict] = None,
search_params: Optional[dict] = None,
drop_old: bool = False,
**kwargs: Any,
) -> Milvus:
"""Create a Milvus collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): Which consistency level to use. Defaults
to "Session".
index_params (Optional[dict], optional): Which index_params to use. Defaults
to None.
search_params (Optional[dict], optional): Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional): Whether to drop the collection with
that name if it exists. Defaults to False.
Returns:
Milvus: Milvus Vector Store
"""
vector_db = cls(
embedding_function=embedding,
collection_name=collection_name,
connection_args=connection_args,
consistency_level=consistency_level,
index_params=index_params,
search_params=search_params,
drop_old=drop_old,
**kwargs,
)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~langchain_experimental~llms~anthropic_functions.py | import json
from collections import defaultdict
from html.parser import HTMLParser
from typing import Any, DefaultDict, Dict, List, Optional
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.chat_models.anthropic import ChatAnthropic
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
SystemMessage,
)
from langchain_experimental.pydantic_v1 import root_validator
prompt = """In addition to responding, you can use tools. \
You have access to the following tools.
{tools}
In order to use a tool, you can use <tool></tool> to specify the name, \
and the <tool_input></tool_input> tags to specify the parameters. \
Each parameter should be passed in as <$param_name>$value</$param_name>, \
Where $param_name is the name of the specific parameter, and $value \
is the value for that parameter.
You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that accepts a single \
parameter 'query' that could run a google search, in order to search \
for the weather in SF you would respond:
<tool>search</tool><tool_input><query>weather in SF</query></tool_input>
<observation>64 degrees</observation>"""
class TagParser(HTMLParser):
def __init__(self) -> None:
"""A heavy-handed solution, but it's fast for prototyping.
Might be re-implemented later to restrict scope to the limited grammar, and
more efficiency.
Uses an HTML parser to parse a limited grammar that allows
for syntax of the form:
INPUT -> JUNK? VALUE*
JUNK -> JUNK_CHARACTER+
JUNK_CHARACTER -> whitespace | ,
VALUE -> <IDENTIFIER>DATA</IDENTIFIER> | OBJECT
OBJECT -> <IDENTIFIER>VALUE+</IDENTIFIER>
IDENTIFIER -> [a-Z][a-Z0-9_]*
DATA -> .*
Interprets the data to allow repetition of tags and recursion
to support representation of complex types.
^ Just another approximately wrong grammar specification.
"""
super().__init__()
self.parse_data: DefaultDict[str, List[Any]] = defaultdict(list)
self.stack: List[DefaultDict[str, List[str]]] = [self.parse_data]
self.success = True
self.depth = 0
self.data: Optional[str] = None
def handle_starttag(self, tag: str, attrs: Any) -> None:
"""Hook when a new tag is encountered."""
self.depth += 1
self.stack.append(defaultdict(list))
self.data = None
def handle_endtag(self, tag: str) -> None:
"""Hook when a tag is closed."""
self.depth -= 1
top_of_stack = dict(self.stack.pop(-1)) # Pop the dictionary we don't need it
# If a lead node
is_leaf = self.data is not None
# Annoying to type here, code is tested, hopefully OK
value = self.data if is_leaf else top_of_stack
# Difficult to type this correctly with mypy (maybe impossible?)
# Can be nested indefinitely, so requires self referencing type
self.stack[-1][tag].append(value) # type: ignore
# Reset the data so we if we encounter a sequence of end tags, we
# don't confuse an outer end tag for belonging to a leaf node.
self.data = None
def handle_data(self, data: str) -> None:
"""Hook when handling data."""
stripped_data = data.strip()
# The only data that's allowed is whitespace or a comma surrounded by whitespace
if self.depth == 0 and stripped_data not in (",", ""):
# If this is triggered the parse should be considered invalid.
self.success = False
if stripped_data: # ignore whitespace-only strings
self.data = stripped_data
def _destrip(tool_input: Any) -> Any:
if isinstance(tool_input, dict):
return {k: _destrip(v) for k, v in tool_input.items()}
elif isinstance(tool_input, list):
if isinstance(tool_input[0], str):
if len(tool_input) == 1:
return tool_input[0]
else:
raise ValueError
elif isinstance(tool_input[0], dict):
return [_destrip(v) for v in tool_input]
else:
raise ValueError
else:
raise ValueError
class AnthropicFunctions(BaseChatModel):
llm: BaseChatModel
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
values["llm"] = values.get("llm") or ChatAnthropic(**values)
return values
@property
def model(self) -> BaseChatModel:
"""For backwards compatibility."""
return self.llm
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
forced = False
function_call = ""
if "functions" in kwargs:
content = prompt.format(tools=json.dumps(kwargs["functions"], indent=2))
system = SystemMessage(content=content)
messages = [system] + messages
del kwargs["functions"]
if stop is None:
stop = ["</tool_input>"]
else:
stop.append("</tool_input>")
if "function_call" in kwargs:
forced = True
function_call = kwargs["function_call"]["name"]
AIMessage(content=f"<tool>{function_call}</tool>")
del kwargs["function_call"]
else:
if "function_call" in kwargs:
raise ValueError(
"if `function_call` provided, `functions` must also be"
)
response = self.model.predict_messages(
messages, stop=stop, callbacks=run_manager, **kwargs
)
completion = response.content
if forced:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + "</tool_input>")
v1 = tag_parser.parse_data["tool_input"][0]
kwargs = {
"function_call": {
"name": function_call,
"arguments": json.dumps(_destrip(v1)),
}
}
message = AIMessage(content="", additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
elif "<tool>" in completion:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + "</tool_input>")
msg = completion.split("<tool>")[0]
v1 = tag_parser.parse_data["tool_input"][0]
kwargs = {
"function_call": {
"name": tag_parser.parse_data["tool"][0],
"arguments": json.dumps(_destrip(v1)),
}
}
message = AIMessage(content=msg, additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
return ChatResult(generations=[ChatGeneration(message=response)])
@property
def _llm_type(self) -> str:
return "anthropic_functions"
| [
"In addition to responding, you can use tools. You have access to the following tools.\n\n{tools}\n\nIn order to use a tool, you can use <tool></tool> to specify the name, and the <tool_input></tool_input> tags to specify the parameters. Each parameter should be passed in as <$param_name>$value</$param_name>, Where $param_name is the name of the specific parameter, and $value is the value for that parameter.\n\nYou will then get back a response in the form <observation></observation>\nFor example, if you have a tool called 'search' that accepts a single parameter 'query' that could run a google search, in order to search for the weather in SF you would respond:\n\n<tool>search</tool><tool_input><query>weather in SF</query></tool_input>\n<observation>64 degrees</observation>",
"<tool></tool>"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~retrievers~test_merger_retriever.py | from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers.merger_retriever import MergerRetriever
from langchain.vectorstores import Chroma
def test_merger_retriever_get_relevant_docs() -> None:
"""Test get_relevant_docs."""
texts_group_a = [
"This is a document about the Boston Celtics",
"Fly me to the moon is one of my favourite songs."
"I simply love going to the movies",
]
texts_group_b = [
"This is a document about the Poenix Suns",
"The Boston Celtics won the game by 20 points",
"Real stupidity beats artificial intelligence every time. TP",
]
embeddings = OpenAIEmbeddings()
retriever_a = Chroma.from_texts(texts_group_a, embedding=embeddings).as_retriever(
search_kwargs={"k": 1}
)
retriever_b = Chroma.from_texts(texts_group_b, embedding=embeddings).as_retriever(
search_kwargs={"k": 1}
)
# The Lord of the Retrievers.
lotr = MergerRetriever(retrievers=[retriever_a, retriever_b])
actual = lotr.get_relevant_documents("Tell me about the Celtics")
assert len(actual) == 2
assert texts_group_a[0] in [d.page_content for d in actual]
assert texts_group_b[1] in [d.page_content for d in actual]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_epsilla.py | """Test Epsilla functionality."""
from langchain.vectorstores import Epsilla
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _test_from_texts() -> Epsilla:
from pyepsilla import vectordb
embeddings = FakeEmbeddings()
client = vectordb.Client()
return Epsilla.from_texts(fake_texts, embeddings, client)
def test_epsilla() -> None:
instance = _test_from_texts()
search = instance.similarity_search(query="bar", k=1)
result_texts = [doc.page_content for doc in search]
assert "bar" in result_texts
def test_epsilla_add_texts() -> None:
from pyepsilla import vectordb
embeddings = FakeEmbeddings()
client = vectordb.Client()
db = Epsilla(client, embeddings)
db.add_texts(fake_texts)
search = db.similarity_search(query="foo", k=1)
result_texts = [doc.page_content for doc in search]
assert "foo" in result_texts
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~powerbi~toolkit.py | """Toolkit for interacting with a Power BI dataset."""
from typing import List, Optional, Union
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.pydantic_v1 import Field
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools import BaseTool
from langchain.tools.powerbi.prompt import (
QUESTION_TO_QUERY_BASE,
SINGLE_QUESTION_TO_QUERY,
USER_INPUT,
)
from langchain.tools.powerbi.tool import (
InfoPowerBITool,
ListPowerBITool,
QueryPowerBITool,
)
from langchain.utilities.powerbi import PowerBIDataset
class PowerBIToolkit(BaseToolkit):
"""Toolkit for interacting with Power BI dataset.
*Security Note*: This toolkit interacts with an external service.
Control access to who can use this toolkit.
Make sure that the capabilities given by this toolkit to the calling
code are appropriately scoped to the application.
See https://python.langchain.com/docs/security for more information.
"""
powerbi: PowerBIDataset = Field(exclude=True)
llm: Union[BaseLanguageModel, BaseChatModel] = Field(exclude=True)
examples: Optional[str] = None
max_iterations: int = 5
callback_manager: Optional[BaseCallbackManager] = None
output_token_limit: Optional[int] = None
tiktoken_model_name: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
QueryPowerBITool(
llm_chain=self._get_chain(),
powerbi=self.powerbi,
examples=self.examples,
max_iterations=self.max_iterations,
output_token_limit=self.output_token_limit,
tiktoken_model_name=self.tiktoken_model_name,
),
InfoPowerBITool(powerbi=self.powerbi),
ListPowerBITool(powerbi=self.powerbi),
]
def _get_chain(self) -> LLMChain:
"""Construct the chain based on the callback manager and model type."""
if isinstance(self.llm, BaseLanguageModel):
return LLMChain(
llm=self.llm,
callback_manager=self.callback_manager
if self.callback_manager
else None,
prompt=PromptTemplate(
template=SINGLE_QUESTION_TO_QUERY,
input_variables=["tool_input", "tables", "schemas", "examples"],
),
)
system_prompt = SystemMessagePromptTemplate(
prompt=PromptTemplate(
template=QUESTION_TO_QUERY_BASE,
input_variables=["tables", "schemas", "examples"],
)
)
human_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template=USER_INPUT,
input_variables=["tool_input"],
)
)
return LLMChain(
llm=self.llm,
callback_manager=self.callback_manager if self.callback_manager else None,
prompt=ChatPromptTemplate.from_messages([system_prompt, human_prompt]),
)
| [
"tool_input",
"[PLACEHOLDER, PLACEHOLDER]"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~memory~test_combined_memory.py | """Test for CombinedMemory class"""
# from langchain.prompts import PromptTemplate
from typing import List
import pytest
from langchain.memory import CombinedMemory, ConversationBufferMemory
@pytest.fixture()
def example_memory() -> List[ConversationBufferMemory]:
example_1 = ConversationBufferMemory(memory_key="foo")
example_2 = ConversationBufferMemory(memory_key="bar")
example_3 = ConversationBufferMemory(memory_key="bar")
return [example_1, example_2, example_3]
def test_basic_functionality(example_memory: List[ConversationBufferMemory]) -> None:
"""Test basic functionality of methods exposed by class"""
combined_memory = CombinedMemory(memories=[example_memory[0], example_memory[1]])
assert combined_memory.memory_variables == ["foo", "bar"]
assert combined_memory.load_memory_variables({}) == {"foo": "", "bar": ""}
combined_memory.save_context(
{"input": "Hello there"}, {"output": "Hello, how can I help you?"}
)
assert combined_memory.load_memory_variables({}) == {
"foo": "Human: Hello there\nAI: Hello, how can I help you?",
"bar": "Human: Hello there\nAI: Hello, how can I help you?",
}
combined_memory.clear()
assert combined_memory.load_memory_variables({}) == {"foo": "", "bar": ""}
def test_repeated_memory_var(example_memory: List[ConversationBufferMemory]) -> None:
"""Test raising error when repeated memory variables found"""
with pytest.raises(ValueError):
CombinedMemory(memories=[example_memory[1], example_memory[2]])
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~evaluation~agents~test_eval_chain.py | """Test agent trajectory evaluation chain."""
from typing import Any, Dict, List, Optional, Tuple
import pytest
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.evaluation.agents.trajectory_eval_chain import (
TrajectoryEval,
TrajectoryEvalChain,
TrajectoryOutputParser,
)
from langchain.pydantic_v1 import Field
from langchain.schema import AgentAction, BaseMessage, OutputParserException
from langchain.tools.base import tool
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
@pytest.fixture
def intermediate_steps() -> List[Tuple[AgentAction, str]]:
return [
(
AgentAction(
tool="Foo",
tool_input="Bar",
log="Star date 2021-06-13: Foo received input: Bar",
),
"Baz",
),
]
@tool
def foo(bar: str) -> str:
"""Foo."""
return bar
class _FakeTrajectoryChatModel(FakeChatModel):
queries: Dict = Field(default_factory=dict)
sequential_responses: Optional[bool] = False
response_index: int = 0
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.sequential_responses:
response = self.queries[list(self.queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
else:
prompt = messages[0].content
return self.queries[prompt]
def test_trajectory_output_parser_parse() -> None:
trajectory_output_parser = TrajectoryOutputParser()
text = """Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 2"""
got = trajectory_output_parser.parse(text)
want = TrajectoryEval(
score=0.25,
reasoning="""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.""",
)
assert got["score"] == want["score"]
assert got["reasoning"] == want["reasoning"]
with pytest.raises(OutputParserException):
trajectory_output_parser.parse(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2."""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 9"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 10"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 0.1"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: One"""
)
def test_trajectory_eval_chain(
intermediate_steps: List[Tuple[AgentAction, str]]
) -> None:
llm = _FakeTrajectoryChatModel(
queries={
"a": "Trajectory good\nScore: 5",
"b": "Trajectory not good\nScore: 1",
},
sequential_responses=True,
)
chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo]) # type: ignore
# Test when ref is not provided
res = chain.evaluate_agent_trajectory(
input="What is your favorite food?",
agent_trajectory=intermediate_steps,
prediction="I like pie.",
)
assert res["score"] == 1.0
# Test when ref is provided
res = chain.evaluate_agent_trajectory(
input="What is your favorite food?",
agent_trajectory=intermediate_steps,
prediction="I like pie.",
reference="Paris",
)
assert res["score"] == 0.0
def test_trajectory_eval_chain_no_tools(
intermediate_steps: List[Tuple[AgentAction, str]]
) -> None:
llm = _FakeTrajectoryChatModel(
queries={
"a": "Trajectory good\nScore: 5",
"b": "Trajectory not good\nScore: 1",
},
sequential_responses=True,
)
chain = TrajectoryEvalChain.from_llm(llm=llm) # type: ignore
res = chain.evaluate_agent_trajectory(
input="What is your favorite food?",
agent_trajectory=intermediate_steps,
prediction="I like pie.",
)
assert res["score"] == 1.0
res = chain.evaluate_agent_trajectory(
input="What is your favorite food?",
agent_trajectory=intermediate_steps,
prediction="I like pie.",
reference="Paris",
)
assert res["score"] == 0.0
def test_old_api_works(intermediate_steps: List[Tuple[AgentAction, str]]) -> None:
llm = _FakeTrajectoryChatModel(
queries={
"a": "Trajectory good\nScore: 5",
"b": "Trajectory not good\nScore: 1",
},
sequential_responses=True,
)
chain = TrajectoryEvalChain.from_llm(llm=llm) # type: ignore
res = chain(
{
"question": "What is your favorite food?",
"agent_trajectory": intermediate_steps,
"answer": "I like pie.",
}
)
assert res["score"] == 1.0
res = chain(
{
"question": "What is your favorite food?",
"agent_trajectory": intermediate_steps,
"answer": "I like pie.",
"reference": "Paris",
}
)
assert res["score"] == 0.0
| [
"Foo."
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~zapier~toolkit.py | """[DEPRECATED] Zapier Toolkit."""
from typing import List
from langchain._api import warn_deprecated
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.zapier.tool import ZapierNLARunAction
from langchain.utilities.zapier import ZapierNLAWrapper
class ZapierToolkit(BaseToolkit):
"""Zapier Toolkit."""
tools: List[BaseTool] = []
@classmethod
def from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Create a toolkit from a ZapierNLAWrapper."""
actions = zapier_nla_wrapper.list()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools)
@classmethod
async def async_from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Create a toolkit from a ZapierNLAWrapper."""
actions = await zapier_nla_wrapper.alist()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
warn_deprecated(
since="0.0.319",
message=(
"This tool will be deprecated on 2023-11-17. See "
"https://nla.zapier.com/sunset/ for details"
),
)
return self.tools
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chat_models~test_ernie.py | import pytest
from langchain.chat_models.ernie import ErnieBotChat
from langchain.schema.messages import AIMessage, HumanMessage
def test_chat_ernie_bot() -> None:
chat = ErnieBotChat()
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_chat_ernie_bot_with_model_name() -> None:
chat = ErnieBotChat(model_name="ERNIE-Bot")
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_chat_ernie_bot_with_temperature() -> None:
chat = ErnieBotChat(model_name="ERNIE-Bot", temperature=1.0)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_chat_ernie_bot_with_kwargs() -> None:
chat = ErnieBotChat()
message = HumanMessage(content="Hello")
response = chat([message], temperature=0.88, top_p=0.7)
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_extra_kwargs() -> None:
chat = ErnieBotChat(temperature=0.88, top_p=0.7)
assert chat.temperature == 0.88
assert chat.top_p == 0.7
def test_wrong_temperature_1() -> None:
chat = ErnieBotChat()
message = HumanMessage(content="Hello")
with pytest.raises(ValueError) as e:
chat([message], temperature=1.2)
assert "parameter check failed, temperature range is (0, 1.0]" in str(e)
def test_wrong_temperature_2() -> None:
chat = ErnieBotChat()
message = HumanMessage(content="Hello")
with pytest.raises(ValueError) as e:
chat([message], temperature=0)
assert "parameter check failed, temperature range is (0, 1.0]" in str(e)
| [
"Hello"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_hologres.py | """Test Hologres functionality."""
import os
from typing import List
from langchain.docstore.document import Document
from langchain.vectorstores.hologres import Hologres
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
CONNECTION_STRING = Hologres.connection_string_from_db_params(
host=os.environ.get("TEST_HOLOGRES_HOST", "localhost"),
port=int(os.environ.get("TEST_HOLOGRES_PORT", "80")),
database=os.environ.get("TEST_HOLOGRES_DATABASE", "postgres"),
user=os.environ.get("TEST_HOLOGRES_USER", "postgres"),
password=os.environ.get("TEST_HOLOGRES_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_hologres() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_hologres_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Hologres.from_embeddings(
text_embeddings=text_embedding_pairs,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_hologres_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_hologres_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_hologres_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_hologres_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [(Document(page_content="baz", metadata={"page": "2"}), 4.0)]
def test_hologres_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~test_dependencies.py | """A unit test meant to catch accidental introduction of non-optional dependencies."""
from pathlib import Path
from typing import Any, Dict, Mapping
import pytest
import toml
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture()
def poetry_conf() -> Dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)["tool"]["poetry"]
def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = poetry_conf["dependencies"]
is_required = {
package_name: isinstance(requirements, str)
or not requirements.get("optional", False)
for package_name, requirements in dependencies.items()
}
required_dependencies = [
package_name for package_name, required in is_required.items() if required
]
assert sorted(required_dependencies) == [
"PyYAML",
"SQLAlchemy",
"aiohttp",
"anyio",
"async-timeout",
"dataclasses-json",
"jsonpatch",
"langsmith",
"numpy",
"pydantic",
"python",
"requests",
"tenacity",
]
unrequired_dependencies = [
package_name for package_name, required in is_required.items() if not required
]
in_extras = [dep for group in poetry_conf["extras"].values() for dep in group]
assert set(unrequired_dependencies) == set(in_extras)
def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
test_group_deps = sorted(poetry_conf["group"]["test"]["dependencies"])
assert test_group_deps == [
"duckdb-engine",
"freezegun",
"lark",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"responses",
"syrupy",
]
def test_imports() -> None:
"""Test that you can import all top level things okay."""
from langchain.agents import OpenAIFunctionsAgent # noqa: F401
from langchain.callbacks import OpenAICallbackHandler # noqa: F401
from langchain.chains import LLMChain # noqa: F401
from langchain.chat_models import ChatOpenAI # noqa: F401
from langchain.document_loaders import BSHTMLLoader # noqa: F401
from langchain.embeddings import OpenAIEmbeddings # noqa: F401
from langchain.llms import OpenAI # noqa: F401
from langchain.retrievers import VespaRetriever # noqa: F401
from langchain.schema import BasePromptTemplate # noqa: F401
from langchain.tools import DuckDuckGoSearchResults # noqa: F401
from langchain.utilities import SerpAPIWrapper # noqa: F401
from langchain.vectorstores import FAISS # noqa: F401
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~titan_takeoff.py | from typing import Any, Iterator, List, Mapping, Optional
import requests
from requests.exceptions import ConnectionError
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.schema.output import GenerationChunk
class TitanTakeoff(LLM):
"""Wrapper around Titan Takeoff APIs."""
base_url: str = "http://localhost:8000"
"""Specifies the baseURL to use for the Titan Takeoff API.
Default = http://localhost:8000.
"""
generate_max_length: int = 128
"""Maximum generation length. Default = 128."""
sampling_topk: int = 1
"""Sample predictions from the top K most probable candidates. Default = 1."""
sampling_topp: float = 1.0
"""Sample from predictions whose cumulative probability exceeds this value.
Default = 1.0.
"""
sampling_temperature: float = 1.0
"""Sample with randomness. Bigger temperatures are associated with
more randomness and 'creativity'. Default = 1.0.
"""
repetition_penalty: float = 1.0
"""Penalise the generation of tokens that have been generated before.
Set to > 1 to penalize. Default = 1 (no penalty).
"""
no_repeat_ngram_size: int = 0
"""Prevent repetitions of ngrams of this size. Default = 0 (turned off)."""
streaming: bool = False
"""Whether to stream the output. Default = False."""
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Titan Takeoff Server."""
params = {
"generate_max_length": self.generate_max_length,
"sampling_topk": self.sampling_topk,
"sampling_topp": self.sampling_topp,
"sampling_temperature": self.sampling_temperature,
"repetition_penalty": self.repetition_penalty,
"no_repeat_ngram_size": self.no_repeat_ngram_size,
}
return params
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "titan_takeoff"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Titan Takeoff generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
try:
if self.streaming:
text_output = ""
for chunk in self._stream(
prompt=prompt,
stop=stop,
run_manager=run_manager,
):
text_output += chunk.text
return text_output
url = f"{self.base_url}/generate"
params = {"text": prompt, **self._default_params}
response = requests.post(url, json=params)
response.raise_for_status()
response.encoding = "utf-8"
text = ""
if "message" in response.json():
text = response.json()["message"]
else:
raise ValueError("Something went wrong.")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
except ConnectionError:
raise ConnectionError(
"Could not connect to Titan Takeoff server. \
Please make sure that the server is running."
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Titan Takeoff stream endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
url = f"{self.base_url}/generate_stream"
params = {"text": prompt, **self._default_params}
response = requests.post(url, json=params, stream=True)
response.encoding = "utf-8"
for text in response.iter_content(chunk_size=1, decode_unicode=True):
if text:
chunk = GenerationChunk(text=text)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"base_url": self.base_url, **{}, **self._default_params}
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_tencentvectordb.py | """Test TencentVectorDB functionality."""
import time
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.vectorstores import TencentVectorDB
from langchain.vectorstores.tencentvectordb import ConnectionParams
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _tencent_vector_db_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> TencentVectorDB:
conn_params = ConnectionParams(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
username="root",
timeout=20,
)
return TencentVectorDB.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
connection_params=conn_params,
drop_old=drop,
)
def test_tencent_vector_db() -> None:
"""Test end to end construction and search."""
docsearch = _tencent_vector_db_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_tencent_vector_db_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
def test_tencent_vector_db_max_marginal_relevance_search() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
def test_tencent_vector_db_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
time.sleep(3)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_tencent_vector_db_no_drop() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
del docsearch
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas, drop=False)
time.sleep(3)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~vertexai.py | from __future__ import annotations
from concurrent.futures import Executor, ThreadPoolExecutor
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
Iterator,
List,
Optional,
Union,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.pydantic_v1 import BaseModel, Field, root_validator
from langchain.schema import (
Generation,
LLMResult,
)
from langchain.schema.output import GenerationChunk
from langchain.utilities.vertexai import (
get_client_info,
init_vertexai,
raise_vertex_import_error,
)
if TYPE_CHECKING:
from google.cloud.aiplatform.gapic import (
PredictionServiceAsyncClient,
PredictionServiceClient,
)
from vertexai.language_models._language_models import (
TextGenerationResponse,
_LanguageModel,
)
def _response_to_generation(
response: TextGenerationResponse,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
try:
generation_info = {
"is_blocked": response.is_blocked,
"safety_attributes": response.safety_attributes,
}
except Exception:
generation_info = None
return GenerationChunk(text=response.text, generation_info=generation_info)
def is_codey_model(model_name: str) -> bool:
"""Returns True if the model name is a Codey model.
Args:
model_name: The model name to check.
Returns: True if the model name is a Codey model.
"""
return "code" in model_name
def _create_retry_decorator(
llm: VertexAI,
*,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import google.api_core
errors = [
google.api_core.exceptions.ResourceExhausted,
google.api_core.exceptions.ServiceUnavailable,
google.api_core.exceptions.Aborted,
google.api_core.exceptions.DeadlineExceeded,
]
decorator = create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
return decorator
def completion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(*args: Any, **kwargs: Any) -> Any:
return llm.client.predict(*args, **kwargs)
return _completion_with_retry(*args, **kwargs)
def stream_completion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(*args: Any, **kwargs: Any) -> Any:
return llm.client.predict_streaming(*args, **kwargs)
return _completion_with_retry(*args, **kwargs)
async def acompletion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _acompletion_with_retry(*args: Any, **kwargs: Any) -> Any:
return await llm.client.predict_async(*args, **kwargs)
return await _acompletion_with_retry(*args, **kwargs)
class _VertexAIBase(BaseModel):
project: Optional[str] = None
"The default GCP project to use when making Vertex API calls."
location: str = "us-central1"
"The default location to use when making API calls."
request_parallelism: int = 5
"The amount of parallelism allowed for requests issued to VertexAI models. "
"Default is 5."
max_retries: int = 6
"""The maximum number of retries to make when generating."""
task_executor: ClassVar[Optional[Executor]] = Field(default=None, exclude=True)
stop: Optional[List[str]] = None
"Optional list of stop words to use when generating."
model_name: Optional[str] = None
"Underlying model name."
@classmethod
def _get_task_executor(cls, request_parallelism: int = 5) -> Executor:
if cls.task_executor is None:
cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism)
return cls.task_executor
class _VertexAICommon(_VertexAIBase):
client: "_LanguageModel" = None #: :meta private:
model_name: str
"Underlying model name."
temperature: float = 0.0
"Sampling temperature, it controls the degree of randomness in token selection."
max_output_tokens: int = 128
"Token limit determines the maximum amount of text output from one prompt."
top_p: float = 0.95
"Tokens are selected from most probable to least until the sum of their "
"probabilities equals the top-p value. Top-p is ignored for Codey models."
top_k: int = 40
"How the model selects tokens for output, the next token is selected from "
"among the top-k most probable tokens. Top-k is ignored for Codey models."
credentials: Any = Field(default=None, exclude=True)
"The default custom credentials (google.auth.credentials.Credentials) to use "
"when making API calls. If not provided, credentials will be ascertained from "
"the environment."
n: int = 1
"""How many completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
@property
def _llm_type(self) -> str:
return "vertexai"
@property
def is_codey_model(self) -> bool:
return is_codey_model(self.model_name)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _default_params(self) -> Dict[str, Any]:
if self.is_codey_model:
return {
"temperature": self.temperature,
"max_output_tokens": self.max_output_tokens,
}
else:
return {
"temperature": self.temperature,
"max_output_tokens": self.max_output_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"candidate_count": self.n,
}
@classmethod
def _try_init_vertexai(cls, values: Dict) -> None:
allowed_params = ["project", "location", "credentials"]
params = {k: v for k, v in values.items() if k in allowed_params}
init_vertexai(**params)
return None
def _prepare_params(
self,
stop: Optional[List[str]] = None,
stream: bool = False,
**kwargs: Any,
) -> dict:
stop_sequences = stop or self.stop
params_mapping = {"n": "candidate_count"}
params = {params_mapping.get(k, k): v for k, v in kwargs.items()}
params = {**self._default_params, "stop_sequences": stop_sequences, **params}
if stream or self.streaming:
params.pop("candidate_count")
return params
class VertexAI(_VertexAICommon, BaseLLM):
"""Google Vertex AI large language models."""
model_name: str = "text-bison"
"The name of the Vertex AI large language model."
tuned_model_name: Optional[str] = None
"The name of a tuned model. If provided, model_name is ignored."
@classmethod
def is_lc_serializable(self) -> bool:
return True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
cls._try_init_vertexai(values)
tuned_model_name = values.get("tuned_model_name")
model_name = values["model_name"]
try:
if not is_codey_model(model_name):
from vertexai.preview.language_models import TextGenerationModel
if tuned_model_name:
values["client"] = TextGenerationModel.get_tuned_model(
tuned_model_name
)
else:
values["client"] = TextGenerationModel.from_pretrained(model_name)
else:
from vertexai.preview.language_models import CodeGenerationModel
if tuned_model_name:
values["client"] = CodeGenerationModel.get_tuned_model(
tuned_model_name
)
else:
values["client"] = CodeGenerationModel.from_pretrained(model_name)
except ImportError:
raise_vertex_import_error()
if values["streaming"] and values["n"] > 1:
raise ValueError("Only one candidate can be generated with streaming!")
return values
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> LLMResult:
should_stream = stream if stream is not None else self.streaming
params = self._prepare_params(stop=stop, stream=should_stream, **kwargs)
generations = []
for prompt in prompts:
if should_stream:
generation = GenerationChunk(text="")
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
generation += chunk
generations.append([generation])
else:
res = completion_with_retry(
self, prompt, run_manager=run_manager, **params
)
if self.is_codey_model:
generations.append([_response_to_generation(res)])
else:
generations.append(
[_response_to_generation(r) for r in res.candidates]
)
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
params = self._prepare_params(stop=stop, **kwargs)
generations = []
for prompt in prompts:
res = await acompletion_with_retry(
self, prompt, run_manager=run_manager, **params
)
generations.append([_response_to_generation(r) for r in res.candidates])
return LLMResult(generations=generations)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = self._prepare_params(stop=stop, stream=True, **kwargs)
for stream_resp in stream_completion_with_retry(
self, prompt, run_manager=run_manager, **params
):
chunk = _response_to_generation(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
class VertexAIModelGarden(_VertexAIBase, BaseLLM):
"""Large language models served from Vertex AI Model Garden."""
client: "PredictionServiceClient" = None #: :meta private:
async_client: "PredictionServiceAsyncClient" = None #: :meta private:
endpoint_id: str
"A name of an endpoint where the model has been deployed."
allowed_model_args: Optional[List[str]] = None
"""Allowed optional args to be passed to the model."""
prompt_arg: str = "prompt"
result_arg: str = "generated_text"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
from google.api_core.client_options import ClientOptions
from google.cloud.aiplatform.gapic import (
PredictionServiceAsyncClient,
PredictionServiceClient,
)
except ImportError:
raise_vertex_import_error()
if values["project"] is None:
raise ValueError(
"A GCP project should be provided to run inference on Model Garden!"
)
client_options = ClientOptions(
api_endpoint=f"{values['location']}-aiplatform.googleapis.com"
)
client_info = get_client_info(module="vertex-ai-model-garden")
values["client"] = PredictionServiceClient(
client_options=client_options, client_info=client_info
)
values["async_client"] = PredictionServiceAsyncClient(
client_options=client_options, client_info=client_info
)
return values
@property
def _llm_type(self) -> str:
return "vertexai_model_garden"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
try:
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
except ImportError:
raise ImportError(
"protobuf package not found, please install it with"
" `pip install protobuf`"
)
instances = []
for prompt in prompts:
if self.allowed_model_args:
instance = {
k: v for k, v in kwargs.items() if k in self.allowed_model_args
}
else:
instance = {}
instance[self.prompt_arg] = prompt
instances.append(instance)
predict_instances = [
json_format.ParseDict(instance_dict, Value()) for instance_dict in instances
]
endpoint = self.client.endpoint_path(
project=self.project, location=self.location, endpoint=self.endpoint_id
)
response = self.client.predict(endpoint=endpoint, instances=predict_instances)
generations: List[List[Generation]] = []
for result in response.predictions:
generations.append(
[Generation(text=prediction[self.result_arg]) for prediction in result]
)
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
try:
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
except ImportError:
raise ImportError(
"protobuf package not found, please install it with"
" `pip install protobuf`"
)
instances = []
for prompt in prompts:
if self.allowed_model_args:
instance = {
k: v for k, v in kwargs.items() if k in self.allowed_model_args
}
else:
instance = {}
instance[self.prompt_arg] = prompt
instances.append(instance)
predict_instances = [
json_format.ParseDict(instance_dict, Value()) for instance_dict in instances
]
endpoint = self.async_client.endpoint_path(
project=self.project, location=self.location, endpoint=self.endpoint_id
)
response = await self.async_client.predict(
endpoint=endpoint, instances=predict_instances
)
generations: List[List[Generation]] = []
for result in response.predictions:
generations.append(
[Generation(text=prediction[self.result_arg]) for prediction in result]
)
return LLMResult(generations=generations)
| [
"prompt"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~utilities~searchapi.py | from typing import Any, Dict, Optional
import aiohttp
import requests
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.utils import get_from_dict_or_env
class SearchApiAPIWrapper(BaseModel):
"""
Wrapper around SearchApi API.
To use, you should have the environment variable ``SEARCHAPI_API_KEY``
set with your API key, or pass `searchapi_api_key`
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.utilities import SearchApiAPIWrapper
searchapi = SearchApiAPIWrapper()
"""
# Use "google" engine by default.
# Full list of supported ones can be found in https://www.searchapi.io docs
engine: str = "google"
searchapi_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that API key exists in environment."""
searchapi_api_key = get_from_dict_or_env(
values, "searchapi_api_key", "SEARCHAPI_API_KEY"
)
values["searchapi_api_key"] = searchapi_api_key
return values
def run(self, query: str, **kwargs: Any) -> str:
results = self.results(query, **kwargs)
return self._result_as_string(results)
async def arun(self, query: str, **kwargs: Any) -> str:
results = await self.aresults(query, **kwargs)
return self._result_as_string(results)
def results(self, query: str, **kwargs: Any) -> dict:
results = self._search_api_results(query, **kwargs)
return results
async def aresults(self, query: str, **kwargs: Any) -> dict:
results = await self._async_search_api_results(query, **kwargs)
return results
def _prepare_request(self, query: str, **kwargs: Any) -> dict:
return {
"url": "https://www.searchapi.io/api/v1/search",
"headers": {
"Authorization": f"Bearer {self.searchapi_api_key}",
},
"params": {
"engine": self.engine,
"q": query,
**{key: value for key, value in kwargs.items() if value is not None},
},
}
def _search_api_results(self, query: str, **kwargs: Any) -> dict:
request_details = self._prepare_request(query, **kwargs)
response = requests.get(
url=request_details["url"],
params=request_details["params"],
headers=request_details["headers"],
)
response.raise_for_status()
return response.json()
async def _async_search_api_results(self, query: str, **kwargs: Any) -> dict:
"""Use aiohttp to send request to SearchApi API and return results async."""
request_details = self._prepare_request(query, **kwargs)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(
url=request_details["url"],
headers=request_details["headers"],
params=request_details["params"],
raise_for_status=True,
) as response:
results = await response.json()
else:
async with self.aiosession.get(
url=request_details["url"],
headers=request_details["headers"],
params=request_details["params"],
raise_for_status=True,
) as response:
results = await response.json()
return results
@staticmethod
def _result_as_string(result: dict) -> str:
toret = "No good search result found"
if "answer_box" in result.keys() and "answer" in result["answer_box"].keys():
toret = result["answer_box"]["answer"]
elif "answer_box" in result.keys() and "snippet" in result["answer_box"].keys():
toret = result["answer_box"]["snippet"]
elif "knowledge_graph" in result.keys():
toret = result["knowledge_graph"]["description"]
elif "organic_results" in result.keys():
snippets = [
r["snippet"] for r in result["organic_results"] if "snippet" in r.keys()
]
toret = "\n".join(snippets)
elif "jobs" in result.keys():
jobs = [
r["description"] for r in result["jobs"] if "description" in r.keys()
]
toret = "\n".join(jobs)
elif "videos" in result.keys():
videos = [
f"""Title: "{r["title"]}" Link: {r["link"]}"""
for r in result["videos"]
if "title" in r.keys()
]
toret = "\n".join(videos)
elif "images" in result.keys():
images = [
f"""Title: "{r["title"]}" Link: {r["original"]["link"]}"""
for r in result["images"]
if "original" in r.keys()
]
toret = "\n".join(images)
return toret
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~tools~office365~create_draft_message.py | from typing import List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.office365.base import O365BaseTool
class CreateDraftMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to include in the draft.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365CreateDraftMessage(O365BaseTool):
"""Tool for creating a draft email in Office 365."""
name: str = "create_email_draft"
description: str = (
"Use this tool to create a draft email with the provided message fields."
)
args_schema: Type[CreateDraftMessageSchema] = CreateDraftMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(cc)
message.save_draft()
output = "Draft created: " + str(message)
return output
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~document_loaders~test_telegram.py | from pathlib import Path
import pytest
from langchain.document_loaders import TelegramChatApiLoader, TelegramChatFileLoader
def test_telegram_chat_file_loader() -> None:
"""Test TelegramChatFileLoader."""
file_path = Path(__file__).parent / "test_docs" / "telegram.json"
loader = TelegramChatFileLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["source"] == str(file_path)
assert docs[0].page_content == (
"Henry on 2020-01-01T00:00:02: It's 2020...\n\n"
"Henry on 2020-01-01T00:00:04: Fireworks!\n\n"
"Grace 🧤 ðŸ\x8d’ on 2020-01-01T00:00:05: You're a minute late!\n\n"
)
@pytest.mark.requires("telethon", "pandas")
def test_telegram_channel_loader_parsing() -> None:
"""Test TelegramChatApiLoader."""
file_path = Path(__file__).parent / "test_docs" / "telegram_channel.json"
# if we don't provide any value, it will skip fetching from telegram
# and will check the parsing logic.
loader = TelegramChatApiLoader(file_path=str(file_path))
docs = loader.load()
assert len(docs) == 1
print(docs[0].page_content)
assert docs[0].page_content == (
"Hello, world!.\nLLMs are awesome! Langchain is great. Telegram is the best!."
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_google_palm.py | """Test Google PaLM Text API wrapper.
Note: This test must be run with the GOOGLE_API_KEY environment variable set to a
valid API key.
"""
from pathlib import Path
from langchain.llms.google_palm import GooglePalm
from langchain.llms.loading import load_llm
def test_google_palm_call() -> None:
"""Test valid call to Google PaLM text API."""
llm = GooglePalm(max_output_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading a Google PaLM LLM."""
llm = GooglePalm(max_output_tokens=10)
llm.save(file_path=tmp_path / "google_palm.yaml")
loaded_llm = load_llm(tmp_path / "google_palm.yaml")
assert loaded_llm == llm
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~self_query~milvus.py | """Logic for converting internal query language to a valid Milvus query."""
from typing import Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
COMPARATOR_TO_BER = {
Comparator.EQ: "==",
Comparator.GT: ">",
Comparator.GTE: ">=",
Comparator.LT: "<",
Comparator.LTE: "<=",
}
UNARY_OPERATORS = [Operator.NOT]
def process_value(value: Union[int, float, str]) -> str:
"""Convert a value to a string and add double quotes if it is a string.
It required for comparators involving strings.
Args:
value: The value to convert.
Returns:
The converted value as a string.
"""
#
if isinstance(value, str):
# If the value is already a string, add double quotes
return f'"{value}"'
else:
# If the valueis not a string, convert it to a string without double quotes
return str(value)
class MilvusTranslator(Visitor):
"""Translate Milvus internal query language elements to valid filters."""
"""Subset of allowed logical operators."""
allowed_operators = [Operator.AND, Operator.NOT, Operator.OR]
"""Subset of allowed logical comparators."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
]
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
value = func.value
if isinstance(func, Comparator):
value = COMPARATOR_TO_BER[func]
return f"{value}"
def visit_operation(self, operation: Operation) -> str:
if operation.operator in UNARY_OPERATORS and len(operation.arguments) == 1:
operator = self._format_func(operation.operator)
return operator + "(" + operation.arguments[0].accept(self) + ")"
elif operation.operator in UNARY_OPERATORS:
raise ValueError(
f'"{operation.operator.value}" can have only one argument in Milvus'
)
else:
args = [arg.accept(self) for arg in operation.arguments]
operator = self._format_func(operation.operator)
return "(" + (" " + operator + " ").join(args) + ")"
def visit_comparison(self, comparison: Comparison) -> str:
comparator = self._format_func(comparison.comparator)
processed_value = process_value(comparison.value)
attribute = comparison.attribute
return "( " + attribute + " " + comparator + " " + processed_value + " )"
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"expr": structured_query.filter.accept(self)}
return structured_query.query, kwargs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~loading.py | """Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.tools import Tool
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
from langchain.schema.language_model import BaseLanguageModel
from langchain.utilities.loading import try_load_from_hub
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[List[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
valid_suffixes = {"json", "yaml"}
if hub_result := try_load_from_hub(
path, _load_agent_from_file, "agents", valid_suffixes
):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~qdrant~test_from_texts.py | import tempfile
import uuid
from typing import Optional
import pytest
from langchain.schema import Document
from langchain.vectorstores import Qdrant
from langchain.vectorstores.qdrant import QdrantException
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.common import qdrant_is_not_running
def test_qdrant_from_texts_stores_duplicated_texts() -> None:
"""Test end to end Qdrant.from_texts stores duplicated texts separately."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["abc", "abc"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
path=str(tmpdir),
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
def test_qdrant_from_texts_stores_ids(
batch_size: int, vector_name: Optional[str]
) -> None:
"""Test end to end Qdrant.from_texts stores provided ids."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
ids = [
"fa38d572-4c31-4579-aedc-1960d79df6df",
"cdc1aa36-d6ab-4fb2-8a94-56674fd27484",
]
vec_store = Qdrant.from_texts(
["abc", "def"],
ConsistentFakeEmbeddings(),
ids=ids,
collection_name=collection_name,
path=str(tmpdir),
batch_size=batch_size,
vector_name=vector_name,
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
@pytest.mark.parametrize("vector_name", ["custom-vector"])
def test_qdrant_from_texts_stores_embeddings_as_named_vectors(vector_name: str) -> None:
"""Test end to end Qdrant.from_texts stores named vectors if name is provided."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 5 == client.count(collection_name).count
assert all(
vector_name in point.vector # type: ignore[operator]
for point in client.scroll(collection_name, with_vectors=True)[0]
)
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
def test_qdrant_from_texts_reuses_same_collection(vector_name: Optional[str]) -> None:
"""Test if Qdrant.from_texts reuses the same collection"""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
embeddings = ConsistentFakeEmbeddings()
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
embeddings,
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
vec_store = Qdrant.from_texts(
["foo", "bar"],
embeddings,
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 7 == client.count(collection_name).count
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
def test_qdrant_from_texts_raises_error_on_different_dimensionality(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.from_texts raises an exception if dimensionality does not match"""
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
with pytest.raises(QdrantException):
Qdrant.from_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
@pytest.mark.parametrize(
["first_vector_name", "second_vector_name"],
[
(None, "custom-vector"),
("custom-vector", None),
("my-first-vector", "my-second_vector"),
],
)
def test_qdrant_from_texts_raises_error_on_different_vector_name(
first_vector_name: Optional[str],
second_vector_name: Optional[str],
) -> None:
"""Test if Qdrant.from_texts raises an exception if vector name does not match"""
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
path=str(tmpdir),
vector_name=first_vector_name,
)
del vec_store
with pytest.raises(QdrantException):
Qdrant.from_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
path=str(tmpdir),
vector_name=second_vector_name,
)
def test_qdrant_from_texts_raises_error_on_different_distance() -> None:
"""Test if Qdrant.from_texts raises an exception if distance does not match"""
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
path=str(tmpdir),
distance_func="Cosine",
)
del vec_store
with pytest.raises(QdrantException) as excinfo:
Qdrant.from_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
path=str(tmpdir),
distance_func="Euclid",
)
expected_message = (
"configured for COSINE similarity, but requested EUCLID. Please set "
"`distance_func` parameter to `COSINE`"
)
assert expected_message in str(excinfo.value)
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
def test_qdrant_from_texts_recreates_collection_on_force_recreate(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.from_texts recreates the collection even if config mismatches"""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
vec_store = Qdrant.from_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
force_recreate=True,
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
def test_qdrant_from_texts_stores_metadatas(
batch_size: int, content_payload_key: str, metadata_payload_key: str
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Qdrant.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
location=":memory:",
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
batch_size=batch_size,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
def test_from_texts_passed_optimizers_config_and_on_disk_payload() -> None:
from qdrant_client import models
collection_name = uuid.uuid4().hex
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
optimizers_config = models.OptimizersConfigDiff(memmap_threshold=1000)
vec_store = Qdrant.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
optimizers_config=optimizers_config,
on_disk_payload=True,
on_disk=True,
collection_name=collection_name,
)
collection_info = vec_store.client.get_collection(collection_name)
assert collection_info.config.params.vectors.on_disk is True # type: ignore
assert collection_info.config.optimizer_config.memmap_threshold == 1000
assert collection_info.config.params.on_disk_payload is True
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_rocksetdb.py | import logging
import os
from langchain.docstore.document import Document
from langchain.vectorstores.rocksetdb import Rockset
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
fake_texts,
)
logger = logging.getLogger(__name__)
# To run these tests, make sure you have a collection with the name `langchain_demo`
# and the following ingest transformation:
#
# SELECT
# _input.* EXCEPT(_meta),
# VECTOR_ENFORCE(_input.description_embedding, 10, 'float') as
# description_embedding
# FROM
# _input
#
# We're using FakeEmbeddings utility to create text embeddings.
# It generates vector embeddings of length 10.
#
# Set env ROCKSET_DELETE_DOCS_ON_START=1 if you want to delete all docs from
# the collection before running any test. Be careful, this will delete any
# existing documents in your Rockset collection.
#
# See https://rockset.com/blog/introducing-vector-search-on-rockset/ for more details.
workspace = "langchain_tests"
collection_name = "langchain_demo"
text_key = "description"
embedding_key = "description_embedding"
class TestRockset:
rockset_vectorstore: Rockset
@classmethod
def setup_class(cls) -> None:
import rockset
import rockset.models
assert os.environ.get("ROCKSET_API_KEY") is not None
assert os.environ.get("ROCKSET_REGION") is not None
api_key = os.environ.get("ROCKSET_API_KEY")
region = os.environ.get("ROCKSET_REGION")
if region == "use1a1":
host = rockset.Regions.use1a1
elif region == "usw2a1":
host = rockset.Regions.usw2a1
elif region == "euc1a1":
host = rockset.Regions.euc1a1
elif region == "dev":
host = rockset.DevRegions.usw2a1
else:
logger.warn(
"Using ROCKSET_REGION:%s as it is.. \
You should know what you're doing...",
region,
)
host = region
client = rockset.RocksetClient(host, api_key)
if os.environ.get("ROCKSET_DELETE_DOCS_ON_START") == "1":
logger.info(
"Deleting all existing documents from the Rockset collection %s",
collection_name,
)
query = f"select _id from {workspace}.{collection_name}"
query_response = client.Queries.query(sql={"query": query})
ids = [
str(r["_id"])
for r in getattr(
query_response, query_response.attribute_map["results"]
)
]
logger.info("Existing ids in collection: %s", ids)
client.Documents.delete_documents(
collection=collection_name,
data=[rockset.models.DeleteDocumentsRequestData(id=i) for i in ids],
workspace=workspace,
)
embeddings = ConsistentFakeEmbeddings()
embeddings.embed_documents(fake_texts)
cls.rockset_vectorstore = Rockset(
client, embeddings, collection_name, text_key, embedding_key, workspace
)
def test_rockset_insert_and_search(self) -> None:
"""Test end to end vector search in Rockset"""
texts = ["foo", "bar", "baz"]
metadatas = [{"metadata_index": i} for i in range(len(texts))]
ids = self.rockset_vectorstore.add_texts(
texts=texts,
metadatas=metadatas,
)
assert len(ids) == len(texts)
# Test that `foo` is closest to `foo`
output = self.rockset_vectorstore.similarity_search(
query="foo", distance_func=Rockset.DistanceFunction.COSINE_SIM, k=1
)
assert output == [Document(page_content="foo", metadata={"metadata_index": 0})]
# Find closest vector to `foo` which is not `foo`
output = self.rockset_vectorstore.similarity_search(
query="foo",
distance_func=Rockset.DistanceFunction.COSINE_SIM,
k=1,
where_str="metadata_index != 0",
)
assert output == [Document(page_content="bar", metadata={"metadata_index": 1})]
def test_build_query_sql(self) -> None:
vector = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
q_str = self.rockset_vectorstore._build_query_sql(
vector,
Rockset.DistanceFunction.COSINE_SIM,
4,
)
vector_str = ",".join(map(str, vector))
expected = f"""\
SELECT * EXCEPT({embedding_key}), \
COSINE_SIM({embedding_key}, [{vector_str}]) as dist
FROM {workspace}.{collection_name}
ORDER BY dist DESC
LIMIT 4
"""
assert q_str == expected
def test_build_query_sql_with_where(self) -> None:
vector = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
q_str = self.rockset_vectorstore._build_query_sql(
vector,
Rockset.DistanceFunction.COSINE_SIM,
4,
"age >= 10",
)
vector_str = ",".join(map(str, vector))
expected = f"""\
SELECT * EXCEPT({embedding_key}), \
COSINE_SIM({embedding_key}, [{vector_str}]) as dist
FROM {workspace}.{collection_name}
WHERE age >= 10
ORDER BY dist DESC
LIMIT 4
"""
assert q_str == expected
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~chat_models~test_ernie.py | import pytest
from langchain.chat_models.ernie import _convert_message_to_dict
from langchain.schema.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
def test__convert_dict_to_message_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_dict_to_message_system() -> None:
message = SystemMessage(content="foo")
with pytest.raises(ValueError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
def test__convert_dict_to_message_function() -> None:
message = FunctionMessage(name="foo", content="bar")
with pytest.raises(ValueError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
| [
"bar",
"foo"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_recursive_url_loader.py | import pytest as pytest
from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader
@pytest.mark.asyncio
def test_async_recursive_url_loader() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url,
extractor=lambda _: "placeholder",
use_async=True,
max_depth=3,
timeout=None,
check_response_status=True,
)
docs = loader.load()
assert len(docs) == 513
assert docs[0].page_content == "placeholder"
@pytest.mark.asyncio
def test_async_recursive_url_loader_deterministic() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url,
use_async=True,
max_depth=3,
timeout=None,
)
docs = sorted(loader.load(), key=lambda d: d.metadata["source"])
docs_2 = sorted(loader.load(), key=lambda d: d.metadata["source"])
assert docs == docs_2
def test_sync_recursive_url_loader() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url, extractor=lambda _: "placeholder", use_async=False, max_depth=2
)
docs = loader.load()
assert len(docs) == 25
assert docs[0].page_content == "placeholder"
@pytest.mark.asyncio
def test_sync_async_equivalent() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
async_loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
docs = sorted(loader.load(), key=lambda d: d.metadata["source"])
async_docs = sorted(async_loader.load(), key=lambda d: d.metadata["source"])
assert docs == async_docs
def test_loading_invalid_url() -> None:
url = "https://this.url.is.invalid/this/is/a/test"
loader = RecursiveUrlLoader(
url, max_depth=1, extractor=lambda _: "placeholder", use_async=False
)
docs = loader.load()
assert len(docs) == 0
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_ctransformers.py | """Test C Transformers wrapper."""
import pytest
from langchain.llms import CTransformers
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_ctransformers_call() -> None:
"""Test valid call to C Transformers."""
config = {"max_new_tokens": 5}
callback_handler = FakeCallbackHandler()
llm = CTransformers(
model="marella/gpt-2-ggml",
config=config,
callbacks=[callback_handler],
)
output = llm("Say foo:")
assert isinstance(output, str)
assert len(output) > 1
assert 0 < callback_handler.llm_streams <= config["max_new_tokens"]
@pytest.mark.asyncio
async def test_ctransformers_async_inference() -> None:
config = {"max_new_tokens": 5}
callback_handler = FakeCallbackHandler()
llm = CTransformers(
model="marella/gpt-2-ggml",
config=config,
callbacks=[callback_handler],
)
output = await llm._acall(prompt="Say foo:")
assert isinstance(output, str)
assert len(output) > 1
assert 0 < callback_handler.llm_streams <= config["max_new_tokens"]
| [] |
2024-01-10 | RohanDey02/langchain | templates~rag-elasticsearch~ingest.py | from langchain.document_loaders import JSONLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores.elasticsearch import ElasticsearchStore
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID")
ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic")
ELASTIC_PASSWORD = os.getenv("ELASTIC_PASSWORD")
ES_URL = os.getenv("ES_URL", "http://localhost:9200")
if ELASTIC_CLOUD_ID and ELASTIC_USERNAME and ELASTIC_PASSWORD:
es_connection_details = {
"es_cloud_id": ELASTIC_CLOUD_ID,
"es_user": ELASTIC_USERNAME,
"es_password": ELASTIC_PASSWORD,
}
else:
es_connection_details = {"es_url": ES_URL}
# Metadata extraction function
def metadata_func(record: dict, metadata: dict) -> dict:
metadata["name"] = record.get("name")
metadata["summary"] = record.get("summary")
metadata["url"] = record.get("url")
metadata["category"] = record.get("category")
metadata["updated_at"] = record.get("updated_at")
return metadata
## Load Data
loader = JSONLoader(
file_path="./data/documents.json",
jq_schema=".[]",
content_key="content",
metadata_func=metadata_func,
)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=250)
all_splits = text_splitter.split_documents(loader.load())
# Add to vectorDB
vectorstore = ElasticsearchStore.from_documents(
documents=all_splits,
embedding=HuggingFaceEmbeddings(
model_name="all-MiniLM-L6-v2", model_kwargs={"device": "cpu"}
),
**es_connection_details,
index_name="workplace-search-example",
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~utilities~test_twilio.py | """Integration test for Sms."""
from langchain.utilities.twilio import TwilioAPIWrapper
def test_call() -> None:
"""Test that call runs."""
twilio = TwilioAPIWrapper()
output = twilio.run("Message", "+16162904619")
assert output
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~embeddings~elasticsearch.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.utils import get_from_env
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
from langchain.schema.embeddings import Embeddings
class ElasticsearchEmbeddings(Embeddings):
"""Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
""" # noqa: E501
def __init__(
self,
client: MlClient,
model_id: str,
*,
input_field: str = "text_field",
):
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
@classmethod
def from_credentials(
cls,
model_id: str,
*,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from Elasticsearch credentials.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to.
es_user: (str, optional): Elasticsearch username.
es_password: (str, optional): Elasticsearch password.
Example:
.. code-block:: python
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
try:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
"elasticsearch package not found, please install with 'pip install "
"elasticsearch'"
)
es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID")
es_user = es_user or get_from_env("es_user", "ES_USER")
es_password = es_password or get_from_env("es_password", "ES_PASSWORD")
# Connect to Elasticsearch
es_connection = Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
@classmethod
def from_es_connection(
cls,
model_id: str,
es_connection: Elasticsearch,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
# Importing MlClient from elasticsearch.client within the method to
# avoid unnecessary import if the method is not used
from elasticsearch.client import MlClient
# Create an MlClient from the given Elasticsearch connection
client = MlClient(es_connection)
# Return a new instance of the ElasticsearchEmbeddings class with
# the MlClient, model_id, and input_field
return cls(client, model_id, input_field=input_field)
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(
model_id=self.model_id, docs=[{self.input_field: text} for text in texts]
)
embeddings = [doc["predicted_value"] for doc in response["inference_results"]]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for a list of documents.
Args:
texts (List[str]): A list of document text strings to generate embeddings
for.
Returns:
List[List[float]]: A list of embeddings, one for each document in the input
list.
"""
return self._embedding_func(texts)
def embed_query(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_propmptlayer_openai_chat.py | """Test PromptLayer OpenAIChat API wrapper."""
from pathlib import Path
import pytest
from langchain.llms.loading import load_llm
from langchain.llms.promptlayer_openai import PromptLayerOpenAIChat
def test_promptlayer_openai_chat_call() -> None:
"""Test valid call to promptlayer openai."""
llm = PromptLayerOpenAIChat(max_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_promptlayer_openai_chat_stop_valid() -> None:
"""Test promptlayer openai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = PromptLayerOpenAIChat(stop="3", temperature=0)
first_output = first_llm(query)
second_llm = PromptLayerOpenAIChat(temperature=0)
second_output = second_llm(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
def test_promptlayer_openai_chat_stop_error() -> None:
"""Test promptlayer openai stop logic on bad configuration."""
llm = PromptLayerOpenAIChat(stop="3", temperature=0)
with pytest.raises(ValueError):
llm("write an ordered list of five items", stop=["\n"])
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAIChat(max_tokens=10)
llm.save(file_path=tmp_path / "openai.yaml")
loaded_llm = load_llm(tmp_path / "openai.yaml")
assert loaded_llm == llm
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_huggingface.py | """Test huggingface embeddings."""
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
HuggingFaceInstructEmbeddings,
)
def test_huggingface_embedding_documents() -> None:
"""Test huggingface embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_huggingface_embedding_query() -> None:
"""Test huggingface embeddings."""
document = "foo bar"
embedding = HuggingFaceEmbeddings(encode_kwargs={"batch_size": 16})
output = embedding.embed_query(document)
assert len(output) == 768
def test_huggingface_instructor_embedding_documents() -> None:
"""Test huggingface embeddings."""
documents = ["foo bar"]
model_name = "hkunlp/instructor-base"
embedding = HuggingFaceInstructEmbeddings(model_name=model_name)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_huggingface_instructor_embedding_query() -> None:
"""Test huggingface embeddings."""
query = "foo bar"
model_name = "hkunlp/instructor-base"
embedding = HuggingFaceInstructEmbeddings(model_name=model_name)
output = embedding.embed_query(query)
assert len(output) == 768
def test_huggingface_instructor_embedding_normalize() -> None:
"""Test huggingface embeddings."""
query = "foo bar"
model_name = "hkunlp/instructor-base"
encode_kwargs = {"normalize_embeddings": True}
embedding = HuggingFaceInstructEmbeddings(
model_name=model_name, encode_kwargs=encode_kwargs
)
output = embedding.embed_query(query)
assert len(output) == 768
eps = 1e-5
norm = sum([o**2 for o in output])
assert abs(1 - norm) <= eps
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~graphs~test_hugegraph.py | import unittest
from typing import Any
from unittest.mock import MagicMock, patch
from langchain.graphs import HugeGraph
class TestHugeGraph(unittest.TestCase):
def setUp(self) -> None:
self.username = "test_user"
self.password = "test_password"
self.address = "test_address"
self.graph = "test_hugegraph"
self.port = 1234
self.session_pool_size = 10
@patch("hugegraph.connection.PyHugeGraph")
def test_init(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(
self.username, self.password, self.address, self.port, self.graph
)
self.assertEqual(huge_graph.username, self.username)
self.assertEqual(huge_graph.password, self.password)
self.assertEqual(huge_graph.address, self.address)
self.assertEqual(huge_graph.port, self.port)
self.assertEqual(huge_graph.graph, self.graph)
@patch("hugegraph.connection.PyHugeGraph")
def test_execute(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(
self.username, self.password, self.address, self.port, self.graph
)
query = "g.V().limit(10)"
result = huge_graph.query(query)
self.assertIsInstance(result, MagicMock)
@patch("hugegraph.connection.PyHugeGraph")
def test_refresh_schema(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(
self.username, self.password, self.address, self.port, self.graph
)
huge_graph.refresh_schema()
self.assertNotEqual(huge_graph.get_schema, "")
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_tensorflow_datasets.py | """Integration tests for the TensorFlow Dataset Loader."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain.document_loaders.tensorflow_datasets import TensorflowDatasetLoader
from langchain.pydantic_v1 import ValidationError
from langchain.schema.document import Document
if TYPE_CHECKING:
import tensorflow as tf # noqa: E402
def decode_to_str(item: tf.Tensor) -> str:
return item.numpy().decode("utf-8")
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
MAX_DOCS = 10
@pytest.fixture
def tfds_client() -> TensorflowDatasetLoader:
return TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
def test_load_success(tfds_client: TensorflowDatasetLoader) -> None:
"""Test that returns the correct answer"""
output = tfds_client.load()
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict)
def test_lazy_load_success(tfds_client: TensorflowDatasetLoader) -> None:
"""Test that returns the correct answer"""
output = list(tfds_client.lazy_load())
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict)
def test_load_fail_wrong_dataset_name() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="wrong_dataset_name",
split_name="test",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
assert "the dataset name is spelled correctly" in str(exc_info.value)
def test_load_fail_wrong_split_name() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="wrong_split_name",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
assert "Unknown split" in str(exc_info.value)
def test_load_fail_no_func() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=MAX_DOCS,
)
assert "Please provide a function" in str(exc_info.value)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~tools~file_management~test_toolkit.py | """Test the FileManagementToolkit."""
from tempfile import TemporaryDirectory
import pytest
from langchain.agents.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
from langchain.tools.base import BaseTool
def test_file_toolkit_get_tools() -> None:
"""Test the get_tools method of FileManagementToolkit."""
with TemporaryDirectory() as temp_dir:
toolkit = FileManagementToolkit(root_dir=temp_dir)
tools = toolkit.get_tools()
assert len(tools) > 0
assert all(isinstance(tool, BaseTool) for tool in tools)
def test_file_toolkit_get_tools_with_selection() -> None:
"""Test the get_tools method of FileManagementToolkit with selected_tools."""
with TemporaryDirectory() as temp_dir:
toolkit = FileManagementToolkit(
root_dir=temp_dir, selected_tools=["read_file", "write_file"]
)
tools = toolkit.get_tools()
assert len(tools) == 2
tool_names = [tool.name for tool in tools]
assert "read_file" in tool_names
assert "write_file" in tool_names
def test_file_toolkit_invalid_tool() -> None:
"""Test the FileManagementToolkit with an invalid tool."""
with TemporaryDirectory() as temp_dir:
with pytest.raises(ValueError):
FileManagementToolkit(root_dir=temp_dir, selected_tools=["invalid_tool"])
def test_file_toolkit_root_dir() -> None:
"""Test the FileManagementToolkit root_dir handling."""
with TemporaryDirectory() as temp_dir:
toolkit = FileManagementToolkit(root_dir=temp_dir)
tools = toolkit.get_tools()
root_dirs = [tool.root_dir for tool in tools if hasattr(tool, "root_dir")]
assert all(root_dir == temp_dir for root_dir in root_dirs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_pubmed.py | """Integration test for PubMed API Wrapper."""
from typing import List
import pytest
from langchain.document_loaders import PubMedLoader
from langchain.schema import Document
xmltodict = pytest.importorskip("xmltodict")
def test_load_success() -> None:
"""Test that returns the correct answer"""
api_client = PubMedLoader(query="chatgpt")
docs = api_client.load()
print(docs)
assert len(docs) == api_client.load_max_docs == 3
assert_docs(docs)
def test_load_success_load_max_docs() -> None:
"""Test that returns the correct answer"""
api_client = PubMedLoader(query="chatgpt", load_max_docs=2)
docs = api_client.load()
print(docs)
assert len(docs) == api_client.load_max_docs == 2
assert_docs(docs)
def test_load_returns_no_result() -> None:
"""Test that gives no result."""
api_client = PubMedLoader(query="1605.08386WWW")
docs = api_client.load()
assert len(docs) == 0
def test_load_no_content() -> None:
"""Returns a Document without content."""
api_client = PubMedLoader(query="37548971")
docs = api_client.load()
print(docs)
assert len(docs) > 0
assert docs[0].page_content == ""
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {
"Copyright Information",
"uid",
"Title",
"Published",
}
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~tigris.py | from __future__ import annotations
import itertools
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple
from langchain.schema import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
if TYPE_CHECKING:
from tigrisdb import TigrisClient
from tigrisdb import VectorStore as TigrisVectorStore
from tigrisdb.types.filters import Filter as TigrisFilter
from tigrisdb.types.vector import Document as TigrisDocument
class Tigris(VectorStore):
"""`Tigris` vector store."""
def __init__(self, client: TigrisClient, embeddings: Embeddings, index_name: str):
"""Initialize Tigris vector store."""
try:
import tigrisdb # noqa: F401
except ImportError:
raise ImportError(
"Could not import tigrisdb python package. "
"Please install it with `pip install tigrisdb`"
)
self._embed_fn = embeddings
self._vector_store = TigrisVectorStore(client.get_search(), index_name)
@property
def embeddings(self) -> Embeddings:
return self._embed_fn
@property
def search_index(self) -> TigrisVectorStore:
return self._vector_store
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids for documents.
Ids will be autogenerated if not provided.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
docs = self._prep_docs(texts, metadatas, ids)
result = self.search_index.add_documents(docs)
return [r.id for r in result]
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[TigrisFilter] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query."""
docs_with_scores = self.similarity_search_with_score(query, k, filter)
return [doc for doc, _ in docs_with_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[TigrisFilter] = None,
) -> List[Tuple[Document, float]]:
"""Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[TigrisFilter]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float.
"""
vector = self._embed_fn.embed_query(query)
result = self.search_index.similarity_search(
vector=vector, k=k, filter_by=filter
)
docs: List[Tuple[Document, float]] = []
for r in result:
docs.append(
(
Document(
page_content=r.doc["text"], metadata=r.doc.get("metadata")
),
r.score,
)
)
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
client: Optional[TigrisClient] = None,
index_name: Optional[str] = None,
**kwargs: Any,
) -> Tigris:
"""Return VectorStore initialized from texts and embeddings."""
if not index_name:
raise ValueError("`index_name` is required")
if not client:
client = TigrisClient()
store = cls(client, embedding, index_name)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return store
def _prep_docs(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]],
ids: Optional[List[str]],
) -> List[TigrisDocument]:
embeddings: List[List[float]] = self._embed_fn.embed_documents(list(texts))
docs: List[TigrisDocument] = []
for t, m, e, _id in itertools.zip_longest(
texts, metadatas or [], embeddings or [], ids or []
):
doc: TigrisDocument = {
"text": t,
"embeddings": e or [],
"metadata": m or {},
}
if _id:
doc["id"] = _id
docs.append(doc)
return docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~wikipedia.py | from typing import List
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import BaseRetriever, Document
from langchain.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper):
"""`Wikipedia API` retriever.
It wraps load() to get_relevant_documents().
It uses all WikipediaAPIWrapper arguments without any change.
"""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return self.load(query=query)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~model_laboratory.py | """Experiment with different models."""
from __future__ import annotations
from typing import List, Optional, Sequence
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.prompts.prompt import PromptTemplate
from langchain.utils.input import get_color_mapping, print_text
class ModelLaboratory:
"""Experiment with different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[List[str]] = None):
"""Initialize with chains to experiment with.
Args:
chains: list of chains to experiment with.
"""
for chain in chains:
if not isinstance(chain, Chain):
raise ValueError(
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
if len(chain.input_keys) != 1:
raise ValueError(
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
if len(chain.output_keys) != 1:
raise ValueError(
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None:
if len(names) != len(chains):
raise ValueError("Length of chains does not match length of names.")
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize with LLMs to experiment with and optional prompt.
Args:
llms: list of LLMs to experiment with
prompt: Optional prompt to use to prompt the LLMs. Defaults to None.
If a prompt was provided, it should only have one input variable.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n")
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
| [
"_input",
"{_input}"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~bedrock.py | import json
import warnings
from abc import ABC
from typing import Any, Dict, Iterator, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
HUMAN_PROMPT = "\n\nHuman:"
ASSISTANT_PROMPT = "\n\nAssistant:"
ALTERNATION_ERROR = (
"Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'."
)
def _add_newlines_before_ha(input_text: str) -> str:
new_text = input_text
for word in ["Human:", "Assistant:"]:
new_text = new_text.replace(word, "\n\n" + word)
for i in range(2):
new_text = new_text.replace("\n\n\n" + word, "\n\n" + word)
return new_text
def _human_assistant_format(input_text: str) -> str:
if input_text.count("Human:") == 0 or (
input_text.find("Human:") > input_text.find("Assistant:")
and "Assistant:" in input_text
):
input_text = HUMAN_PROMPT + " " + input_text # SILENT CORRECTION
if input_text.count("Assistant:") == 0:
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
if input_text[: len("Human:")] == "Human:":
input_text = "\n\n" + input_text
input_text = _add_newlines_before_ha(input_text)
count = 0
# track alternation
for i in range(len(input_text)):
if input_text[i : i + len(HUMAN_PROMPT)] == HUMAN_PROMPT:
if count % 2 == 0:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f" Received {input_text}")
if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
if count % 2 == 1:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f" Received {input_text}")
if count % 2 == 1: # Only saw Human, no Assistant
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
return input_text
class LLMInputOutputAdapter:
"""Adapter class to prepare the inputs from Langchain to a format
that LLM model expects.
It also provides helper function to extract
the generated text from the model response."""
provider_to_output_key_map = {
"anthropic": "completion",
"amazon": "outputText",
"cohere": "text",
}
@classmethod
def prepare_input(
cls, provider: str, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
input_body = {**model_kwargs}
if provider == "anthropic":
input_body["prompt"] = _human_assistant_format(prompt)
elif provider == "ai21" or provider == "cohere":
input_body["prompt"] = prompt
elif provider == "amazon":
input_body = dict()
input_body["inputText"] = prompt
input_body["textGenerationConfig"] = {**model_kwargs}
else:
input_body["inputText"] = prompt
if provider == "anthropic" and "max_tokens_to_sample" not in input_body:
input_body["max_tokens_to_sample"] = 256
return input_body
@classmethod
def prepare_output(cls, provider: str, response: Any) -> str:
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
return response_body.get("completion")
else:
response_body = json.loads(response.get("body").read())
if provider == "ai21":
return response_body.get("completions")[0].get("data").get("text")
elif provider == "cohere":
return response_body.get("generations")[0].get("text")
else:
return response_body.get("results")[0].get("outputText")
@classmethod
def prepare_output_stream(
cls, provider: str, response: Any, stop: Optional[List[str]] = None
) -> Iterator[GenerationChunk]:
stream = response.get("body")
if not stream:
return
if provider not in cls.provider_to_output_key_map:
raise ValueError(
f"Unknown streaming response output key for provider: {provider}"
)
for event in stream:
chunk = event.get("chunk")
if chunk:
chunk_obj = json.loads(chunk.get("bytes").decode())
if provider == "cohere" and (
chunk_obj["is_finished"]
or chunk_obj[cls.provider_to_output_key_map[provider]]
== "<EOS_TOKEN>"
):
return
# chunk obj format varies with provider
yield GenerationChunk(
text=chunk_obj[cls.provider_to_output_key_map[provider]]
)
class BedrockBase(BaseModel, ABC):
"""Base class for Bedrock models."""
client: Any #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str
"""Id of the model to call, e.g., amazon.titan-text-express-v1, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
streaming: bool = False
"""Whether to stream the results."""
provider_stop_sequence_key_name_map: Mapping[str, str] = {
"anthropic": "stop_sequences",
"amazon": "stopSequences",
"ai21": "stop_sequences",
"cohere": "stop_sequences",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
# Skip creating new client if passed in constructor
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock-runtime", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
def _get_provider(self) -> str:
return self.model_id.split(".")[0]
@property
def _model_is_anthropic(self) -> bool:
return self._get_provider() == "anthropic"
def _prepare_input_and_invoke(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
accept = "application/json"
contentType = "application/json"
try:
response = self.client.invoke_model(
body=body, modelId=self.model_id, accept=accept, contentType=contentType
)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _prepare_input_and_invoke_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
if stop:
if provider not in self.provider_stop_sequence_key_name_map:
raise ValueError(
f"Stop sequence key name for {provider} is not supported."
)
# stop sequence from _generate() overrides
# stop sequences in the class attribute
_model_kwargs[self.provider_stop_sequence_key_name_map.get(provider)] = stop
if provider == "cohere":
_model_kwargs["stream"] = True
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
try:
response = self.client.invoke_model_with_response_stream(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
for chunk in LLMInputOutputAdapter.prepare_output_stream(
provider, response, stop
):
yield chunk
if run_manager is not None:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
class Bedrock(LLM, BedrockBase):
"""Bedrock models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from bedrock_langchain.bedrock_llm import BedrockLLM
llm = BedrockLLM(
credentials_profile_name="default",
model_id="amazon.titan-text-express-v1",
streaming=True
)
"""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_bedrock"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Bedrock service with streaming.
Args:
prompt (str): The prompt to pass into the model
stop (Optional[List[str]], optional): Stop sequences. These will
override any stop sequences in the `model_kwargs` attribute.
Defaults to None.
run_manager (Optional[CallbackManagerForLLMRun], optional): Callback
run managers used to process the output. Defaults to None.
Returns:
Iterator[GenerationChunk]: Generator that yields the streamed responses.
Yields:
Iterator[GenerationChunk]: Responses from the model.
"""
return self._prepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
return self._prepare_input_and_invoke(prompt=prompt, stop=stop, **kwargs)
def get_num_tokens(self, text: str) -> int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
def get_token_ids(self, text: str) -> List[int]:
if self._model_is_anthropic:
return get_token_ids_anthropic(text)
else:
return super().get_token_ids(text)
| [
"\n\nAssistant:",
"\n\nHuman:"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~ainetwork~toolkit.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Literal, Optional
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.pydantic_v1 import root_validator
from langchain.tools import BaseTool
from langchain.tools.ainetwork.app import AINAppOps
from langchain.tools.ainetwork.owner import AINOwnerOps
from langchain.tools.ainetwork.rule import AINRuleOps
from langchain.tools.ainetwork.transfer import AINTransfer
from langchain.tools.ainetwork.utils import authenticate
from langchain.tools.ainetwork.value import AINValueOps
if TYPE_CHECKING:
from ain.ain import Ain
class AINetworkToolkit(BaseToolkit):
"""Toolkit for interacting with AINetwork Blockchain.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
See https://python.langchain.com/docs/security for more information.
"""
network: Optional[Literal["mainnet", "testnet"]] = "testnet"
interface: Optional[Ain] = None
@root_validator(pre=True)
def set_interface(cls, values: dict) -> dict:
if not values.get("interface"):
values["interface"] = authenticate(network=values.get("network", "testnet"))
return values
class Config:
"""Pydantic config."""
validate_all = True
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
AINAppOps(),
AINOwnerOps(),
AINRuleOps(),
AINTransfer(),
AINValueOps(),
]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chains~router~multi_retrieval_qa.py | """Use a single chain to route an input to one of multiple retrieval qa chains."""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.chains import ConversationChain
from langchain.chains.base import Chain
from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
from langchain.chains.router.base import MultiRouteChain
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.chains.router.multi_retrieval_prompt import (
MULTI_RETRIEVAL_ROUTER_TEMPLATE,
)
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.schema import BaseRetriever
from langchain.schema.language_model import BaseLanguageModel
class MultiRetrievalQAChain(MultiRouteChain):
"""A multi-route chain that uses an LLM router chain to choose amongst retrieval
qa chains."""
router_chain: LLMRouterChain
"""Chain for deciding a destination chain and the input to it."""
destination_chains: Mapping[str, BaseRetrievalQA]
"""Map of name to candidate chains that inputs can be routed to."""
default_chain: Chain
"""Default chain to use when router doesn't map input to one of the destinations."""
@property
def output_keys(self) -> List[str]:
return ["result"]
@classmethod
def from_retrievers(
cls,
llm: BaseLanguageModel,
retriever_infos: List[Dict[str, Any]],
default_retriever: Optional[BaseRetriever] = None,
default_prompt: Optional[PromptTemplate] = None,
default_chain: Optional[Chain] = None,
**kwargs: Any,
) -> MultiRetrievalQAChain:
if default_prompt and not default_retriever:
raise ValueError(
"`default_retriever` must be specified if `default_prompt` is "
"provided. Received only `default_prompt`."
)
destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos]
destinations_str = "\n".join(destinations)
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
destinations=destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(next_inputs_inner_key="query"),
)
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
destination_chains = {}
for r_info in retriever_infos:
prompt = r_info.get("prompt")
retriever = r_info["retriever"]
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
name = r_info["name"]
destination_chains[name] = chain
if default_chain:
_default_chain = default_chain
elif default_retriever:
_default_chain = RetrievalQA.from_llm(
llm, prompt=default_prompt, retriever=default_retriever
)
else:
prompt_template = DEFAULT_TEMPLATE.replace("input", "query")
prompt = PromptTemplate(
template=prompt_template, input_variables=["history", "query"]
)
_default_chain = ConversationChain(
llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result"
)
return cls(
router_chain=router_chain,
destination_chains=destination_chains,
default_chain=_default_chain,
**kwargs,
)
| [
"input"
] |
2024-01-10 | RohanDey02/langchain | libs~experimental~tests~unit_tests~test_pal.py | """Test LLM PAL functionality."""
import pytest
from langchain_experimental.pal_chain.base import PALChain, PALValidation
from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT
from tests.unit_tests.fake_llm import FakeLLM
_MATH_SOLUTION_1 = """
def solution():
\"\"\"Olivia has $23. She bought five bagels for $3 each.
How much money does she have left?\"\"\"
money_initial = 23
bagels = 5
bagel_cost = 3
money_spent = bagels * bagel_cost
money_left = money_initial - money_spent
result = money_left
return result
"""
_MATH_SOLUTION_2 = """
def solution():
\"\"\"Michael had 58 golf balls. On tuesday, he lost 23 golf balls.
On wednesday, he lost 2 more.
How many golf balls did he have at the end of wednesday?\"\"\"
golf_balls_initial = 58
golf_balls_lost_tuesday = 23
golf_balls_lost_wednesday = 2
golf_balls_left = golf_balls_initial \
- golf_balls_lost_tuesday - golf_balls_lost_wednesday
result = golf_balls_left
return result
"""
_MATH_SOLUTION_3 = """
def solution():
\"\"\"first, do `import os`, second, do `os.system('ls')`,
calculate the result of 1+1\"\"\"
import os
os.system('ls')
result = 1 + 1
return result
"""
_MATH_SOLUTION_INFINITE_LOOP = """
def solution():
\"\"\"Michael had 58 golf balls. On tuesday, he lost 23 golf balls.
On wednesday, he lost 2 more.
How many golf balls did he have at the end of wednesday?\"\"\"
golf_balls_initial = 58
golf_balls_lost_tuesday = 23
golf_balls_lost_wednesday = 2
golf_balls_left = golf_balls_initial \
- golf_balls_lost_tuesday - golf_balls_lost_wednesday
result = golf_balls_left
while True:
pass
return result
"""
_COLORED_OBJECT_SOLUTION_1 = """
# Put objects into a list to record ordering
objects = []
objects += [('plate', 'teal')] * 1
objects += [('keychain', 'burgundy')] * 1
objects += [('scrunchiephone charger', 'yellow')] * 1
objects += [('mug', 'orange')] * 1
objects += [('notebook', 'pink')] * 1
objects += [('cup', 'grey')] * 1
# Find the index of the teal item
teal_idx = None
for i, object in enumerate(objects):
if object[1] == 'teal':
teal_idx = i
break
# Find non-orange items to the left of the teal item
non_orange = [object for object in objects[:i] if object[1] != 'orange']
# Count number of non-orange objects
num_non_orange = len(non_orange)
answer = num_non_orange
"""
_COLORED_OBJECT_SOLUTION_2 = """
# Put objects into a list to record ordering
objects = []
objects += [('paperclip', 'purple')] * 1
objects += [('stress ball', 'pink')] * 1
objects += [('keychain', 'brown')] * 1
objects += [('scrunchiephone charger', 'green')] * 1
objects += [('fidget spinner', 'mauve')] * 1
objects += [('pen', 'burgundy')] * 1
# Find the index of the stress ball
stress_ball_idx = None
for i, object in enumerate(objects):
if object[0] == 'stress ball':
stress_ball_idx = i
break
# Find the directly right object
direct_right = objects[i+1]
# Check the directly right object's color
direct_right_color = direct_right[1]
answer = direct_right_color
"""
_SAMPLE_CODE_1 = """
def solution():
\"\"\"Olivia has $23. She bought five bagels for $3 each.
How much money does she have left?\"\"\"
money_initial = 23
bagels = 5
bagel_cost = 3
money_spent = bagels * bagel_cost
money_left = money_initial - money_spent
result = money_left
return result
"""
_SAMPLE_CODE_2 = """
def solution2():
\"\"\"Olivia has $23. She bought five bagels for $3 each.
How much money does she have left?\"\"\"
money_initial = 23
bagels = 5
bagel_cost = 3
money_spent = bagels * bagel_cost
money_left = money_initial - money_spent
result = money_left
return result
"""
_SAMPLE_CODE_3 = """
def solution():
\"\"\"Olivia has $23. She bought five bagels for $3 each.
How much money does she have left?\"\"\"
money_initial = 23
bagels = 5
bagel_cost = 3
money_spent = bagels * bagel_cost
money_left = money_initial - money_spent
result = money_left
exec("evil")
return result
"""
_SAMPLE_CODE_4 = """
import random
def solution():
return random.choice()
"""
_FULL_CODE_VALIDATIONS = PALValidation(
solution_expression_name="solution",
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
allow_imports=False,
allow_command_exec=False,
)
_ILLEGAL_COMMAND_EXEC_VALIDATIONS = PALValidation(
solution_expression_name="solution",
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
allow_imports=True,
allow_command_exec=False,
)
_MINIMAL_VALIDATIONS = PALValidation(
solution_expression_name="solution",
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
allow_imports=True,
allow_command_exec=True,
)
_NO_IMPORTS_VALIDATIONS = PALValidation(
solution_expression_name="solution",
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
allow_imports=False,
allow_command_exec=True,
)
def test_math_question_1() -> None:
"""Test simple question."""
question = """Olivia has $23. She bought five bagels for $3 each.
How much money does she have left?"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_1}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None)
output = fake_pal_chain.run(question)
assert output == "8"
def test_math_question_2() -> None:
"""Test simple question."""
question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls.
On wednesday, he lost 2 more. How many golf balls did he have
at the end of wednesday?"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_2}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None)
output = fake_pal_chain.run(question)
assert output == "33"
def test_math_question_3() -> None:
"""Test simple question."""
question = """first, do `import os`, second, do `os.system('ls')`,
calculate the result of 1+1"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_3}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None)
with pytest.raises(ValueError) as exc_info:
fake_pal_chain.run(question)
assert (
str(exc_info.value)
== f"Generated code has disallowed imports: {_MATH_SOLUTION_3}"
)
def test_math_question_infinite_loop() -> None:
"""Test simple question."""
question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls.
On wednesday, he lost 2 more. How many golf balls did he have
at the end of wednesday?"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_INFINITE_LOOP}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=1)
output = fake_pal_chain.run(question)
assert output == "Execution timed out"
def test_color_question_1() -> None:
"""Test simple question."""
question = """On the nightstand, you see the following items arranged in a row:
a teal plate, a burgundy keychain, a yellow scrunchiephone charger,
an orange mug, a pink notebook, and a grey cup. How many non-orange
items do you see to the left of the teal item?"""
prompt = COLORED_OBJECT_PROMPT.format(question=question)
queries = {prompt: _COLORED_OBJECT_SOLUTION_1}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_colored_object_prompt(fake_llm, timeout=None)
output = fake_pal_chain.run(question)
assert output == "0"
def test_color_question_2() -> None:
"""Test simple question."""
question = """On the table, you see a bunch of objects arranged in a row: a purple
paperclip, a pink stress ball, a brown keychain, a green
scrunchiephone charger, a mauve fidget spinner, and a burgundy pen.
What is the color of the object directly to the right of
the stress ball?"""
prompt = COLORED_OBJECT_PROMPT.format(question=question)
queries = {prompt: _COLORED_OBJECT_SOLUTION_2}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_colored_object_prompt(fake_llm, timeout=None)
output = fake_pal_chain.run(question)
assert output == "brown"
def test_valid_code_validation() -> None:
"""Test the validator."""
PALChain.validate_code(_SAMPLE_CODE_1, _FULL_CODE_VALIDATIONS)
def test_different_solution_expr_code_validation() -> None:
"""Test the validator."""
with pytest.raises(ValueError):
PALChain.validate_code(_SAMPLE_CODE_2, _FULL_CODE_VALIDATIONS)
def test_illegal_command_exec_disallowed_code_validation() -> None:
"""Test the validator."""
with pytest.raises(ValueError):
PALChain.validate_code(_SAMPLE_CODE_3, _ILLEGAL_COMMAND_EXEC_VALIDATIONS)
def test_illegal_command_exec_allowed_code_validation() -> None:
"""Test the validator."""
PALChain.validate_code(_SAMPLE_CODE_3, _MINIMAL_VALIDATIONS)
def test_no_imports_code_validation() -> None:
"""Test the validator."""
PALChain.validate_code(_SAMPLE_CODE_4, _MINIMAL_VALIDATIONS)
def test_no_imports_disallowed_code_validation() -> None:
"""Test the validator."""
with pytest.raises(ValueError):
PALChain.validate_code(_SAMPLE_CODE_4, _NO_IMPORTS_VALIDATIONS)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~self_query~qdrant.py | from __future__ import annotations
from typing import TYPE_CHECKING, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
if TYPE_CHECKING:
from qdrant_client.http import models as rest
class QdrantTranslator(Visitor):
"""Translate `Qdrant` internal query language elements to valid filters."""
allowed_operators = (
Operator.AND,
Operator.OR,
Operator.NOT,
)
"""Subset of allowed logical operators."""
allowed_comparators = (
Comparator.EQ,
Comparator.LT,
Comparator.LTE,
Comparator.GT,
Comparator.GTE,
)
"""Subset of allowed logical comparators."""
def __init__(self, metadata_key: str):
self.metadata_key = metadata_key
def visit_operation(self, operation: Operation) -> rest.Filter:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
args = [arg.accept(self) for arg in operation.arguments]
operator = {
Operator.AND: "must",
Operator.OR: "should",
Operator.NOT: "must_not",
}[operation.operator]
return rest.Filter(**{operator: args})
def visit_comparison(self, comparison: Comparison) -> rest.FieldCondition:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
self._validate_func(comparison.comparator)
attribute = self.metadata_key + "." + comparison.attribute
if comparison.comparator == Comparator.EQ:
return rest.FieldCondition(
key=attribute, match=rest.MatchValue(value=comparison.value)
)
kwargs = {comparison.comparator.value: comparison.value}
return rest.FieldCondition(key=attribute, range=rest.Range(**kwargs))
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
if structured_query.filter is None:
kwargs = {}
else:
filter = structured_query.filter.accept(self)
if isinstance(filter, rest.FieldCondition):
filter = rest.Filter(must=[filter])
kwargs = {"filter": filter}
return structured_query.query, kwargs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~utils~test_html.py | from langchain.utils.html import (
PREFIXES_TO_IGNORE,
SUFFIXES_TO_IGNORE,
extract_sub_links,
find_all_links,
)
def test_find_all_links_none() -> None:
html = "<span>Hello world</span>"
actual = find_all_links(html)
assert actual == []
def test_find_all_links_single() -> None:
htmls = [
"href='foobar.com'",
'href="foobar.com"',
'<div><a class="blah" href="foobar.com">hullo</a></div>',
]
actual = [find_all_links(html) for html in htmls]
assert actual == [["foobar.com"]] * 3
def test_find_all_links_multiple() -> None:
html = (
'<div><a class="blah" href="https://foobar.com">hullo</a></div>'
'<div><a class="bleh" href="/baz/cool">buhbye</a></div>'
)
actual = find_all_links(html)
assert sorted(actual) == [
"/baz/cool",
"https://foobar.com",
]
def test_find_all_links_ignore_suffix() -> None:
html = 'href="foobar{suffix}"'
for suffix in SUFFIXES_TO_IGNORE:
actual = find_all_links(html.format(suffix=suffix))
assert actual == []
# Don't ignore if pattern doesn't occur at end of link.
html = 'href="foobar{suffix}more"'
for suffix in SUFFIXES_TO_IGNORE:
actual = find_all_links(html.format(suffix=suffix))
assert actual == [f"foobar{suffix}more"]
def test_find_all_links_ignore_prefix() -> None:
html = 'href="{prefix}foobar"'
for prefix in PREFIXES_TO_IGNORE:
actual = find_all_links(html.format(prefix=prefix))
assert actual == []
# Don't ignore if pattern doesn't occur at beginning of link.
html = 'href="foobar{prefix}more"'
for prefix in PREFIXES_TO_IGNORE:
# Pound signs are split on when not prefixes.
if prefix == "#":
continue
actual = find_all_links(html.format(prefix=prefix))
assert actual == [f"foobar{prefix}more"]
def test_find_all_links_drop_fragment() -> None:
html = 'href="foobar.com/woah#section_one"'
actual = find_all_links(html)
assert actual == ["foobar.com/woah"]
def test_extract_sub_links() -> None:
html = (
'<a href="https://foobar.com">one</a>'
'<a href="http://baz.net">two</a>'
'<a href="//foobar.com/hello">three</a>'
'<a href="/how/are/you/doing">four</a>'
)
expected = sorted(
[
"https://foobar.com",
"https://foobar.com/hello",
"https://foobar.com/how/are/you/doing",
]
)
actual = sorted(extract_sub_links(html, "https://foobar.com"))
assert actual == expected
actual = extract_sub_links(html, "https://foobar.com/hello")
expected = ["https://foobar.com/hello"]
assert actual == expected
actual = sorted(
extract_sub_links(html, "https://foobar.com/hello", prevent_outside=False)
)
expected = sorted(
[
"https://foobar.com",
"http://baz.net",
"https://foobar.com/hello",
"https://foobar.com/how/are/you/doing",
]
)
assert actual == expected
def test_extract_sub_links_base() -> None:
html = (
'<a href="https://foobar.com">one</a>'
'<a href="http://baz.net">two</a>'
'<a href="//foobar.com/hello">three</a>'
'<a href="/how/are/you/doing">four</a>'
'<a href="alexis.html"</a>'
)
expected = sorted(
[
"https://foobar.com",
"https://foobar.com/hello",
"https://foobar.com/how/are/you/doing",
"https://foobar.com/hello/alexis.html",
]
)
actual = sorted(
extract_sub_links(
html, "https://foobar.com/hello/bill.html", base_url="https://foobar.com"
)
)
assert actual == expected
def test_extract_sub_links_exclude() -> None:
html = (
'<a href="https://foobar.com">one</a>'
'<a href="http://baz.net">two</a>'
'<a href="//foobar.com/hello">three</a>'
'<a href="/how/are/you/doing">four</a>'
'<a href="alexis.html"</a>'
)
expected = sorted(
[
"http://baz.net",
"https://foobar.com",
"https://foobar.com/hello",
"https://foobar.com/hello/alexis.html",
]
)
actual = sorted(
extract_sub_links(
html,
"https://foobar.com/hello/bill.html",
base_url="https://foobar.com",
prevent_outside=False,
exclude_prefixes=("https://foobar.com/how", "http://baz.org"),
)
)
assert actual == expected
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~agent~test_powerbi_agent.py | import pytest
from langchain.agents.agent_toolkits import PowerBIToolkit, create_pbi_agent
from langchain.chat_models import ChatOpenAI
from langchain.utilities.powerbi import PowerBIDataset
from langchain.utils import get_from_env
def azure_installed() -> bool:
try:
from azure.core.credentials import TokenCredential # noqa: F401
from azure.identity import DefaultAzureCredential # noqa: F401
return True
except Exception as e:
print(f"azure not installed, skipping test {e}")
return False
@pytest.mark.skipif(not azure_installed(), reason="requires azure package")
def test_daxquery() -> None:
from azure.identity import DefaultAzureCredential
DATASET_ID = get_from_env("", "POWERBI_DATASET_ID")
TABLE_NAME = get_from_env("", "POWERBI_TABLE_NAME")
NUM_ROWS = get_from_env("", "POWERBI_NUMROWS")
fast_llm = ChatOpenAI(
temperature=0.5, max_tokens=1000, model_name="gpt-3.5-turbo", verbose=True
)
smart_llm = ChatOpenAI(
temperature=0, max_tokens=100, model_name="gpt-4", verbose=True
)
toolkit = PowerBIToolkit(
powerbi=PowerBIDataset(
dataset_id=DATASET_ID,
table_names=[TABLE_NAME],
credential=DefaultAzureCredential(),
),
llm=smart_llm,
)
agent_executor = create_pbi_agent(llm=fast_llm, toolkit=toolkit, verbose=True)
output = agent_executor.run(f"How many rows are in the table, {TABLE_NAME}")
assert NUM_ROWS in output
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~utilities~test_github.py | """Integration test for Github Wrapper."""
import pytest
from langchain.utilities.github import GitHubAPIWrapper
# Make sure you have set the following env variables:
# GITHUB_REPOSITORY
# GITHUB_BRANCH
# GITHUB_APP_ID
# GITHUB_PRIVATE_KEY
@pytest.fixture
def api_client() -> GitHubAPIWrapper:
return GitHubAPIWrapper()
def test_get_open_issues(api_client: GitHubAPIWrapper) -> None:
"""Basic test to fetch issues"""
issues = api_client.get_issues()
assert len(issues) != 0
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~youtube.py | """Loads YouTube transcript."""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from urllib.parse import parse_qs, urlparse
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.pydantic_v1 import root_validator
from langchain.pydantic_v1.dataclasses import dataclass
logger = logging.getLogger(__name__)
SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"]
@dataclass
class GoogleApiClient:
"""Generic Google API Client.
To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google``
python package installed.
As the google api expects credentials you need to set up a google account and
register your Service. "https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
"""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
service_account_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
def __post_init__(self) -> None:
self.creds = self._load_credentials()
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("credentials_path") and not values.get(
"service_account_path"
):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
creds = None
if self.service_account_path.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_path)
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
ALLOWED_SCHEMAS = {"http", "https"}
ALLOWED_NETLOCK = {
"youtu.be",
"m.youtube.com",
"youtube.com",
"www.youtube.com",
"www.youtube-nocookie.com",
"vid.plus",
}
def _parse_video_id(url: str) -> Optional[str]:
"""Parse a youtube url and return the video id if valid, otherwise None."""
parsed_url = urlparse(url)
if parsed_url.scheme not in ALLOWED_SCHEMAS:
return None
if parsed_url.netloc not in ALLOWED_NETLOCK:
return None
path = parsed_url.path
if path.endswith("/watch"):
query = parsed_url.query
parsed_query = parse_qs(query)
if "v" in parsed_query:
ids = parsed_query["v"]
video_id = ids if isinstance(ids, str) else ids[0]
else:
return None
else:
path = parsed_url.path.lstrip("/")
video_id = path.split("/")[-1]
if len(video_id) != 11: # Video IDs are 11 characters long
return None
return video_id
class YoutubeLoader(BaseLoader):
"""Load `YouTube` transcripts."""
def __init__(
self,
video_id: str,
add_video_info: bool = False,
language: Union[str, Sequence[str]] = "en",
translation: str = "en",
continue_on_failure: bool = False,
):
"""Initialize with YouTube video ID."""
self.video_id = video_id
self.add_video_info = add_video_info
self.language = language
if isinstance(language, str):
self.language = [language]
else:
self.language = language
self.translation = translation
self.continue_on_failure = continue_on_failure
@staticmethod
def extract_video_id(youtube_url: str) -> str:
"""Extract video id from common YT urls."""
video_id = _parse_video_id(youtube_url)
if not video_id:
raise ValueError(
f"Could not determine the video ID for the URL {youtube_url}"
)
return video_id
@classmethod
def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader:
"""Given youtube URL, load video."""
video_id = cls.extract_video_id(youtube_url)
return cls(video_id, **kwargs)
def load(self) -> List[Document]:
"""Load documents."""
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
YouTubeTranscriptApi,
)
except ImportError:
raise ImportError(
"Could not import youtube_transcript_api python package. "
"Please install it with `pip install youtube-transcript-api`."
)
metadata = {"source": self.video_id}
if self.add_video_info:
# Get more video meta info
# Such as title, description, thumbnail url, publish_date
video_info = self._get_video_info()
metadata.update(video_info)
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
except TranscriptsDisabled:
return []
try:
transcript = transcript_list.find_transcript(self.language)
except NoTranscriptFound:
en_transcript = transcript_list.find_transcript(["en"])
transcript = en_transcript.translate(self.translation)
transcript_pieces = transcript.fetch()
transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces])
return [Document(page_content=transcript, metadata=metadata)]
def _get_video_info(self) -> dict:
"""Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
"Could not import pytube python package. "
"Please install it with `pip install pytube`."
)
yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}")
video_info = {
"title": yt.title or "Unknown",
"description": yt.description or "Unknown",
"view_count": yt.views or 0,
"thumbnail_url": yt.thumbnail_url or "Unknown",
"publish_date": yt.publish_date.strftime("%Y-%m-%d %H:%M:%S")
if yt.publish_date
else "Unknown",
"length": yt.length or 0,
"author": yt.author or "Unknown",
}
return video_info
@dataclass
class GoogleApiYoutubeLoader(BaseLoader):
"""Load all Videos from a `YouTube` Channel.
To use, you should have the ``googleapiclient,youtube_transcript_api``
python package installed.
As the service needs a google_api_client, you first have to initialize
the GoogleApiClient.
Additionally you have to either provide a channel name or a list of videoids
"https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
from langchain.document_loaders import GoogleApiYoutubeLoader
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
loader = GoogleApiYoutubeLoader(
google_api_client=google_api_client,
channel_name = "CodeAesthetic"
)
load.load()
"""
google_api_client: GoogleApiClient
channel_name: Optional[str] = None
video_ids: Optional[List[str]] = None
add_video_info: bool = True
captions_language: str = "en"
continue_on_failure: bool = False
def __post_init__(self) -> None:
self.youtube_client = self._build_youtube_client(self.google_api_client.creds)
def _build_youtube_client(self, creds: Any) -> Any:
try:
from googleapiclient.discovery import build
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
return build("youtube", "v3", credentials=creds)
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("channel_name") and not values.get("video_ids"):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _get_transcripe_for_video_id(self, video_id: str) -> str:
from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
try:
transcript = transcript_list.find_transcript([self.captions_language])
except NoTranscriptFound:
for available_transcript in transcript_list:
transcript = available_transcript.translate(self.captions_language)
continue
transcript_pieces = transcript.fetch()
return " ".join([t["text"].strip(" ") for t in transcript_pieces])
def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document:
captions = self._get_transcripe_for_video_id(video_id)
video_response = (
self.youtube_client.videos()
.list(
part="id,snippet",
id=video_id,
)
.execute()
)
return Document(
page_content=captions,
metadata=video_response.get("items")[0],
)
def _get_channel_id(self, channel_name: str) -> str:
request = self.youtube_client.search().list(
part="id",
q=channel_name,
type="channel",
maxResults=1, # we only need one result since channel names are unique
)
response = request.execute()
channel_id = response["items"][0]["id"]["channelId"]
return channel_id
def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]:
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
)
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"youtube-transcript-api` "
"to use the youtube loader"
)
channel_id = self._get_channel_id(channel)
request = self.youtube_client.search().list(
part="id,snippet",
channelId=channel_id,
maxResults=50, # adjust this value to retrieve more or fewer videos
)
video_ids = []
while request is not None:
response = request.execute()
# Add each video ID to the list
for item in response["items"]:
if not item["id"].get("videoId"):
continue
meta_data = {"videoId": item["id"]["videoId"]}
if self.add_video_info:
item["snippet"].pop("thumbnails")
meta_data.update(item["snippet"])
try:
page_content = self._get_transcripe_for_video_id(
item["id"]["videoId"]
)
video_ids.append(
Document(
page_content=page_content,
metadata=meta_data,
)
)
except (TranscriptsDisabled, NoTranscriptFound) as e:
if self.continue_on_failure:
logger.error(
"Error fetching transscript "
+ f" {item['id']['videoId']}, exception: {e}"
)
else:
raise e
pass
request = self.youtube_client.search().list_next(request, response)
return video_ids
def load(self) -> List[Document]:
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend(
[
self._get_document_for_video_id(video_id)
for video_id in self.video_ids
]
)
else:
raise ValueError("Must specify either channel_name or video_ids")
return document_list
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~evaluation~qa~eval_chain.py | """LLM Chains for evaluating question answering."""
from __future__ import annotations
import re
import string
from typing import Any, List, Optional, Sequence, Tuple
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT
from langchain.evaluation.schema import LLMEvalChain, StringEvaluator
from langchain.prompts import PromptTemplate
from langchain.pydantic_v1 import Extra
from langchain.schema import RUN_KEY
from langchain.schema.language_model import BaseLanguageModel
def _get_score(text: str) -> Optional[Tuple[str, int]]:
match = re.search(r"grade:\s*(correct|incorrect)", text.strip(), re.IGNORECASE)
if match:
if match.group(1).upper() == "CORRECT":
return "CORRECT", 1
elif match.group(1).upper() == "INCORRECT":
return "INCORRECT", 0
try:
first_word = (
text.strip().split()[0].translate(str.maketrans("", "", string.punctuation))
)
if first_word.upper() == "CORRECT":
return "CORRECT", 1
elif first_word.upper() == "INCORRECT":
return "INCORRECT", 0
last_word = (
text.strip()
.split()[-1]
.translate(str.maketrans("", "", string.punctuation))
)
if last_word.upper() == "CORRECT":
return "CORRECT", 1
elif last_word.upper() == "INCORRECT":
return "INCORRECT", 0
except IndexError:
pass
return None
def _parse_string_eval_output(text: str) -> dict:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Any: The parsed output.
"""
reasoning = text.strip()
parsed_scores = _get_score(reasoning)
if parsed_scores is None:
value, score = None, None
else:
value, score = parsed_scores
return {
"reasoning": reasoning,
"value": value,
"score": score,
}
class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
"""LLM Chain for evaluating question answering."""
output_key: str = "results" #: :meta private:
class Config:
"""Configuration for the QAEvalChain."""
extra = Extra.ignore
@property
def evaluation_name(self) -> str:
return "correctness"
@property
def requires_reference(self) -> bool:
return True
@property
def requires_input(self) -> bool:
return True
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
**kwargs: Any,
) -> QAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'input', 'answer' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
QAEvalChain: the loaded QA eval chain.
"""
prompt = prompt or PROMPT
expected_input_vars = {"query", "answer", "result"}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt.input_variables}"
)
return cls(llm=llm, prompt=prompt, **kwargs)
def evaluate(
self,
examples: Sequence[dict],
predictions: Sequence[dict],
question_key: str = "query",
answer_key: str = "answer",
prediction_key: str = "result",
*,
callbacks: Callbacks = None,
) -> List[dict]:
"""Evaluate question answering examples and predictions."""
inputs = [
{
"query": example[question_key],
"answer": example[answer_key],
"result": predictions[i][prediction_key],
}
for i, example in enumerate(examples)
]
return self.apply(inputs, callbacks=callbacks)
def _prepare_output(self, result: dict) -> dict:
parsed_result = _parse_string_eval_output(result[self.output_key])
if RUN_KEY in result:
parsed_result[RUN_KEY] = result[RUN_KEY]
return parsed_result
def _evaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): the LLM or chain prediction to evaluate.
reference (Optional[str], optional): the reference label
to evaluate against.
input (Optional[str], optional): the input to consider during evaluation
callbacks (Callbacks, optional): the callbacks to use for tracing.
include_run_info (bool, optional): whether to include run info in the
returned results.
**kwargs: additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
"""
result = self(
{
"query": input,
"answer": reference,
"result": prediction,
},
callbacks=callbacks,
include_run_info=include_run_info,
)
return self._prepare_output(result)
async def _aevaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
result = await self.acall(
inputs={"query": input, "answer": reference, "result": prediction},
callbacks=callbacks,
include_run_info=include_run_info,
)
return self._prepare_output(result)
class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
"""LLM Chain for evaluating QA w/o GT based on context"""
@property
def requires_reference(self) -> bool:
"""Whether the chain requires a reference string."""
return True
@property
def requires_input(self) -> bool:
"""Whether the chain requires an input string."""
return True
class Config:
"""Configuration for the QAEvalChain."""
extra = Extra.ignore
@classmethod
def _validate_input_vars(cls, prompt: PromptTemplate) -> None:
expected_input_vars = {"query", "context", "result"}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt.input_variables}"
)
@property
def evaluation_name(self) -> str:
return "Contextual Accuracy"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
**kwargs: Any,
) -> ContextQAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'query', 'context' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
ContextQAEvalChain: the loaded QA eval chain.
"""
prompt = prompt or CONTEXT_PROMPT
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs)
def evaluate(
self,
examples: List[dict],
predictions: List[dict],
question_key: str = "query",
context_key: str = "context",
prediction_key: str = "result",
*,
callbacks: Callbacks = None,
) -> List[dict]:
"""Evaluate question answering examples and predictions."""
inputs = [
{
"query": example[question_key],
"context": example[context_key],
"result": predictions[i][prediction_key],
}
for i, example in enumerate(examples)
]
return self.apply(inputs, callbacks=callbacks)
def _prepare_output(self, result: dict) -> dict:
parsed_result = _parse_string_eval_output(result[self.output_key])
if RUN_KEY in result:
parsed_result[RUN_KEY] = result[RUN_KEY]
return parsed_result
def _evaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
result = self(
{
"query": input,
"context": reference,
"result": prediction,
},
callbacks=callbacks,
include_run_info=include_run_info,
)
return self._prepare_output(result)
async def _aevaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
result = await self.acall(
inputs={"query": input, "context": reference, "result": prediction},
callbacks=callbacks,
include_run_info=include_run_info,
)
return self._prepare_output(result)
class CotQAEvalChain(ContextQAEvalChain):
"""LLM Chain for evaluating QA using chain of thought reasoning."""
@property
def evaluation_name(self) -> str:
return "COT Contextual Accuracy"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
**kwargs: Any,
) -> CotQAEvalChain:
"""Load QA Eval Chain from LLM."""
prompt = prompt or COT_PROMPT
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~azure_cognitive_services.py | from __future__ import annotations
import sys
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools.azure_cognitive_services import (
AzureCogsFormRecognizerTool,
AzureCogsImageAnalysisTool,
AzureCogsSpeech2TextTool,
AzureCogsText2SpeechTool,
)
from langchain.tools.base import BaseTool
class AzureCognitiveServicesToolkit(BaseToolkit):
"""Toolkit for Azure Cognitive Services."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = [
AzureCogsFormRecognizerTool(),
AzureCogsSpeech2TextTool(),
AzureCogsText2SpeechTool(),
]
# TODO: Remove check once azure-ai-vision supports MacOS.
if sys.platform.startswith("linux") or sys.platform.startswith("win"):
tools.append(AzureCogsImageAnalysisTool())
return tools
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~agent~test_pandas_agent.py | import re
import numpy as np
import pytest
from pandas import DataFrame
from langchain.agents import create_pandas_dataframe_agent
from langchain.agents.agent import AgentExecutor
from langchain.llms import OpenAI
@pytest.fixture(scope="module")
def df() -> DataFrame:
random_data = np.random.rand(4, 4)
df = DataFrame(random_data, columns=["name", "age", "food", "sport"])
return df
# Figure out type hint here
@pytest.fixture(scope="module")
def df_list() -> list:
random_data = np.random.rand(4, 4)
df1 = DataFrame(random_data, columns=["name", "age", "food", "sport"])
random_data = np.random.rand(2, 2)
df2 = DataFrame(random_data, columns=["name", "height"])
df_list = [df1, df2]
return df_list
def test_pandas_agent_creation(df: DataFrame) -> None:
agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df)
assert isinstance(agent, AgentExecutor)
def test_data_reading(df: DataFrame) -> None:
agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df)
assert isinstance(agent, AgentExecutor)
response = agent.run("how many rows in df? Give me a number.")
result = re.search(rf".*({df.shape[0]}).*", response)
assert result is not None
assert result.group(1) is not None
def test_data_reading_no_df_in_prompt(df: DataFrame) -> None:
agent = create_pandas_dataframe_agent(
OpenAI(temperature=0), df, include_df_in_prompt=False
)
assert isinstance(agent, AgentExecutor)
response = agent.run("how many rows in df? Give me a number.")
result = re.search(rf".*({df.shape[0]}).*", response)
assert result is not None
assert result.group(1) is not None
def test_multi_df(df_list: list) -> None:
agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df_list, verbose=True)
response = agent.run("how many total rows in the two dataframes? Give me a number.")
result = re.search(r".*(6).*", response)
assert result is not None
assert result.group(1) is not None
def test_multi_df_no_df_in_prompt(df_list: list) -> None:
agent = create_pandas_dataframe_agent(
OpenAI(temperature=0), df_list, include_df_in_prompt=False
)
response = agent.run("how many total rows in the two dataframes? Give me a number.")
result = re.search(r".*(6).*", response)
assert result is not None
assert result.group(1) is not None
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~retrievers~document_compressors~test_base.py | """Integration test for compression pipelines."""
from langchain.document_transformers import EmbeddingsRedundantFilter
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers.document_compressors import (
DocumentCompressorPipeline,
EmbeddingsFilter,
)
from langchain.schema import Document
from langchain.text_splitter import CharacterTextSplitter
def test_document_compressor_pipeline() -> None:
embeddings = OpenAIEmbeddings()
splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator=". ")
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8)
pipeline_filter = DocumentCompressorPipeline(
transformers=[splitter, redundant_filter, relevant_filter]
)
texts = [
"This sentence is about cows",
"This sentence was about cows",
"foo bar baz",
]
docs = [Document(page_content=". ".join(texts))]
actual = pipeline_filter.compress_documents(docs, "Tell me about farm animals")
assert len(actual) == 1
assert actual[0].page_content in texts[:2]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_vald.py | """Test Vald functionality."""
import time
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.vectorstores import Vald
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
"""
To run, you should have a Vald cluster.
https://github.com/vdaas/vald/blob/main/docs/tutorial/get-started.md
"""
WAIT_TIME = 90
def _vald_from_texts(
metadatas: Optional[List[dict]] = None,
host: str = "localhost",
port: int = 8080,
skip_strict_exist_check: bool = True,
) -> Vald:
return Vald.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
host=host,
port=port,
skip_strict_exist_check=skip_strict_exist_check,
)
def test_vald_add_texts() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME) # Wait for CreateIndex
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 3
texts = ["a", "b", "c"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch.add_texts(texts, metadatas)
time.sleep(WAIT_TIME) # Wait for CreateIndex
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_vald_delete() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 3
docsearch.delete(["foo"])
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 2
def test_vald_search() -> None:
"""Test end to end construction and search."""
docsearch = _vald_from_texts()
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
def test_vald_search_with_score() -> None:
"""Test end to end construction and search with scores."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
assert scores[0] < scores[1] < scores[2]
def test_vald_search_by_vector() -> None:
"""Test end to end construction and search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.similarity_search_by_vector(embedding, k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
def test_vald_search_with_score_by_vector() -> None:
"""Test end to end construction and search with scores by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.similarity_search_with_score_by_vector(embedding, k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
assert scores[0] < scores[1] < scores[2]
def test_vald_max_marginal_relevance_search() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
]
def test_vald_max_marginal_relevance_search_by_vector() -> None:
"""Test end to end construction and MRR search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.max_marginal_relevance_search_by_vector(
embedding, k=2, fetch_k=3
)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~tools~file_management~test_file_search.py | """Test the FileSearch tool."""
from pathlib import Path
from tempfile import TemporaryDirectory
from langchain.tools.file_management.file_search import FileSearchTool
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
)
def test_file_search_with_root_dir() -> None:
"""Test the FileSearch tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = FileSearchTool(root_dir=temp_dir)
file_1 = Path(temp_dir) / "file1.txt"
file_2 = Path(temp_dir) / "file2.log"
file_1.write_text("File 1 content")
file_2.write_text("File 2 content")
matches = tool.run({"dir_path": ".", "pattern": "*.txt"}).split("\n")
assert len(matches) == 1
assert Path(matches[0]).name == "file1.txt"
def test_file_search_errs_outside_root_dir() -> None:
"""Test the FileSearch tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = FileSearchTool(root_dir=temp_dir)
result = tool.run({"dir_path": "..", "pattern": "*.txt"})
assert result == INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value="..")
def test_file_search() -> None:
"""Test the FileSearch tool."""
with TemporaryDirectory() as temp_dir:
tool = FileSearchTool()
file_1 = Path(temp_dir) / "file1.txt"
file_2 = Path(temp_dir) / "file2.log"
file_1.write_text("File 1 content")
file_2.write_text("File 2 content")
matches = tool.run({"dir_path": temp_dir, "pattern": "*.txt"}).split("\n")
assert len(matches) == 1
assert Path(matches[0]).name == "file1.txt"
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.